From bed7697178e237f9b048d38cf665e85175fd20f0 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Wed, 22 Aug 2018 23:33:06 -0400 Subject: [PATCH 1/7] storage: check for merges in AdminSplit retry loop The retry loop in AdminSplit can span many seconds. In that time, the replica may lose its lease, or the range might be merged away entirely. In either of those cases, the split can never succeed, and so the retry loop needs to give up. The loop was properly exiting if it noticed it lost its lease, but a range can get merged away without losing its lease. The final lease on that range remains valid until the liveness epoch it is tied to expires. Teach the loop to notice that condition too by checking Replica.IsDestroyed on every turn of the loop. Release note: None --- pkg/storage/replica_command.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkg/storage/replica_command.go b/pkg/storage/replica_command.go index 479aeaf539ff..faeefb6fe741 100644 --- a/pkg/storage/replica_command.go +++ b/pkg/storage/replica_command.go @@ -150,6 +150,15 @@ func (r *Replica) AdminSplit( retryOpts := base.DefaultRetryOptions() retryOpts.MaxRetries = 10 for retryable := retry.StartWithCtx(ctx, retryOpts); retryable.Next(); { + // The replica may have been destroyed since the start of the retry loop. We + // need to explicitly check this condition. Having a valid lease, as we + // verify below, does not imply that the range still exists: even after a + // range has been merged into its left-hand neighbor, its final lease (i.e., + // the lease we have in r.mu.state.Lease) can remain valid indefinitely. + if _, err := r.IsDestroyed(); err != nil { + return reply, roachpb.NewError(err) + } + // Admin commands always require the range lease to begin (see // executeAdminBatch), but we may have lost it while in this retry loop. // Without the lease, a replica's local descriptor can be arbitrarily From ccc29bbfd2ffacb3a9dda8409813d9dcdd8b64a0 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Wed, 22 Aug 2018 10:07:51 -0400 Subject: [PATCH 2/7] storage: remove MergeMaxRHSSize setting Now that merges do not include a snapshot of the RHS data in the merge trigger, we no longer need a setting limiting the size of the RHS of a merge. Release note: None --- pkg/storage/client_merge_test.go | 22 ---------------------- pkg/storage/merge_queue.go | 25 ++----------------------- 2 files changed, 2 insertions(+), 45 deletions(-) diff --git a/pkg/storage/client_merge_test.go b/pkg/storage/client_merge_test.go index 472c52ba3a44..85d317854d9a 100644 --- a/pkg/storage/client_merge_test.go +++ b/pkg/storage/client_merge_test.go @@ -2153,30 +2153,11 @@ func TestMergeQueue(t *testing.T) { verifyMerged(t) }) - t.Run("rhs-setting-threshold", func(t *testing.T) { - reset(t) - - if err := store.DB().Put(ctx, "b-key", "val"); err != nil { - t.Fatal(err) - } - store.ForceMergeScanAndProcess() - verifyUnmerged(t) - - storage.MergeMaxRHSSize.Override(sv, 100) - defer storage.MergeMaxRHSSize.Override(sv, storage.MergeMaxRHSSize.Default()) - store.ForceMergeScanAndProcess() - verifyMerged(t) - }) - rng, _ := randutil.NewPseudoRand() t.Run("rhs-replica-threshold", func(t *testing.T) { reset(t) - // Make the RHS cluster setting threshold irrelevant. - storage.MergeMaxRHSSize.Override(sv, 1<<32) - defer storage.MergeMaxRHSSize.Override(sv, storage.MergeMaxRHSSize.Default()) - bytes := randutil.RandBytes(rng, int(defaultZone.RangeMinBytes)) if err := store.DB().Put(ctx, "b-key", bytes); err != nil { t.Fatal(err) @@ -2207,9 +2188,6 @@ func TestMergeQueue(t *testing.T) { t.Run("combined-threshold", func(t *testing.T) { reset(t) - storage.MergeMaxRHSSize.Override(sv, 1<<32) - defer storage.MergeMaxRHSSize.Override(sv, storage.MergeMaxRHSSize.Default()) - // The ranges are individually beneath the minimum size threshold, but // together they'll exceed the maximum size threshold. setThresholds(200, 200) diff --git a/pkg/storage/merge_queue.go b/pkg/storage/merge_queue.go index 7bdf8aaeab26..2845f5ebdca0 100644 --- a/pkg/storage/merge_queue.go +++ b/pkg/storage/merge_queue.go @@ -50,18 +50,6 @@ const ( mergeQueueConcurrency = 1 ) -// MergeMaxRHSSize is a setting the controls the maximum size of the right-hand -// range in a merge. -var MergeMaxRHSSize = func() *settings.ByteSizeSetting { - s := settings.RegisterByteSizeSetting( - "kv.range_merge.max_rhs_size", - "maximum size of the right-hand range in a merge", - 0, - ) - s.Hide() - return s -}() - // MergeQueueEnabled is a setting that controls whether the merge queue is // enabled. var MergeQueueEnabled = func() *settings.BoolSetting { @@ -79,13 +67,9 @@ var MergeQueueEnabled = func() *settings.BoolSetting { // // A range will only be queued if it is beneath the minimum size threshold. Once // queued, the size of the right-hand neighbor will additionally be checked; -// merges can only proceed if a) the right-hand neighbor is smaller than -// MergeMaxRHSSize, and b) the merged range would not need to be immediately +// merges can only proceed if a) the right-hand neighbor is beneath the minimum +// size threshold, and b) the merged range would not need to be immediately // split, e.g. because the new range would exceed the maximum size threshold. -// Note that (a) is a limitation of the current merge implementation. The right- -// hand range's data must be rewritten into the left-hand range, even if the -// ranges are collocated, which results in quite a bit of write amplification. -// We hope to lift this restriction once this copy is unnecessary. // // Note that the merge queue is not capable of initiating all possible merges. // Consider the example below: @@ -232,11 +216,6 @@ func (mq *mergeQueue) process( minBytes, lhsStats.Total()) return nil } - if maxBytes := MergeMaxRHSSize.Get(&mq.store.ClusterSettings().SV); rhsStats.Total() > maxBytes { - log.VEventf(ctx, 2, "skipping merge: RHS exceeds maximum size %d with %d bytes", - maxBytes, rhsStats.Total()) - return nil - } mergedDesc := &roachpb.RangeDescriptor{ StartKey: lhsDesc.StartKey, From 4a3c814ee73c952c1538c21ea9f6c2c940d3cde1 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Wed, 22 Aug 2018 10:17:15 -0400 Subject: [PATCH 3/7] storage: turn down default merge queue interval Merges are relatively expensive. Set the merge queue interval to one second so we avoid processing too many merges at once. Introduce a cluster setting to allow users/tests to adjust the merge queue interval if they so choose. Fix #27769. Release note: None --- pkg/storage/client_merge_test.go | 1 + pkg/storage/merge_queue.go | 22 ++++++++++++++-------- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/pkg/storage/client_merge_test.go b/pkg/storage/client_merge_test.go index 85d317854d9a..cdf838cd861f 100644 --- a/pkg/storage/client_merge_test.go +++ b/pkg/storage/client_merge_test.go @@ -2085,6 +2085,7 @@ func TestMergeQueue(t *testing.T) { storeCfg.TestingKnobs.DisableScanner = true sv := &storeCfg.Settings.SV storage.MergeQueueEnabled.Override(sv, true) + storage.MergeQueueInterval.Override(sv, 0) // process greedily var mtc multiTestContext mtc.storeConfig = &storeCfg mtc.Start(t, 2) diff --git a/pkg/storage/merge_queue.go b/pkg/storage/merge_queue.go index 2845f5ebdca0..71f0542bfafd 100644 --- a/pkg/storage/merge_queue.go +++ b/pkg/storage/merge_queue.go @@ -32,12 +32,6 @@ import ( ) const ( - // mergeQueueTimerDuration is the duration between merges of queued ranges. - // - // TODO(benesch): rate-limit merges before 2.1 is released. It's currently set - // aggressively to smoke out problems in alphas. - mergeQueueTimerDuration = 0 - // mergeQueuePurgatoryCheckInterval is the interval at which replicas in // purgatory make merge attempts. Since merges are relatively untested, the // reasons that a range may fail to merge are unknown, so the merge queue has @@ -62,6 +56,18 @@ var MergeQueueEnabled = func() *settings.BoolSetting { return s }() +// MergeQueueInterval is a setting that controls how often the merge queue waits +// between processing replicas. +var MergeQueueInterval = func() *settings.DurationSetting { + s := settings.RegisterNonNegativeDurationSetting( + "kv.range_merge.queue_interval", + "how long the merge queue waits between processing replicas", + time.Second, + ) + s.Hide() + return s +}() + // mergeQueue manages a queue of ranges slated to be merged with their right- // hand neighbor. // @@ -274,8 +280,8 @@ func (mq *mergeQueue) process( return nil } -func (*mergeQueue) timer(time.Duration) time.Duration { - return mergeQueueTimerDuration +func (mq *mergeQueue) timer(time.Duration) time.Duration { + return MergeQueueInterval.Get(&mq.store.ClusterSettings().SV) } func (mq *mergeQueue) purgatoryChan() <-chan time.Time { From 4a157209213b2f6ebc71dce5c7ab8af36897c9e3 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Wed, 22 Aug 2018 10:47:40 -0400 Subject: [PATCH 4/7] storage: update zone config installation in TestSystemZoneConfigs Teach TestSystemZoneConfigs to install zone configs via SQL, rather than the hacky testing override system, which interacts poorly with the forthcoming on-by-default merge queue. Release note: None --- pkg/storage/client_replica_test.go | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/pkg/storage/client_replica_test.go b/pkg/storage/client_replica_test.go index 656f414ee55c..b1a4effd9e82 100644 --- a/pkg/storage/client_replica_test.go +++ b/pkg/storage/client_replica_test.go @@ -40,6 +40,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/storage/storagebase" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/caller" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -1796,33 +1797,24 @@ func TestSystemZoneConfigs(t *testing.T) { testutils.SucceedsSoon(t, waitForReplicas) log.Info(ctx, "TestSystemZoneConfig: initial replication succeeded") - // Allow for inserting zone configs without having to go through (or - // duplicate the logic from) the CLI. - config.TestingSetupZoneConfigHook(tc.Stopper()) - // Update the meta zone config to have more replicas and expect the number // of replicas to go up accordingly after running all replicas through the // replicate queue. - zoneConfig := config.DefaultZoneConfig() - zoneConfig.NumReplicas += 2 - config.TestingSetZoneConfig(keys.MetaRangesID, zoneConfig) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) + sqlutils.SetZoneConfig(t, sqlDB, "RANGE meta", "num_replicas: 5") expectedReplicas += 2 testutils.SucceedsSoon(t, waitForReplicas) log.Info(ctx, "TestSystemZoneConfig: up-replication of meta ranges succeeded") // Do the same thing, but down-replicating the timeseries range. - zoneConfig = config.DefaultZoneConfig() - zoneConfig.NumReplicas -= 2 - config.TestingSetZoneConfig(keys.TimeseriesRangesID, zoneConfig) + sqlutils.SetZoneConfig(t, sqlDB, "RANGE timeseries", "num_replicas: 1") expectedReplicas -= 2 testutils.SucceedsSoon(t, waitForReplicas) log.Info(ctx, "TestSystemZoneConfig: down-replication of timeseries ranges succeeded") // Finally, verify the system ranges. Note that in a new cluster there are // two system ranges, which we have to take into account here. - zoneConfig = config.DefaultZoneConfig() - zoneConfig.NumReplicas += 2 - config.TestingSetZoneConfig(keys.SystemRangesID, zoneConfig) + sqlutils.SetZoneConfig(t, sqlDB, "RANGE system", "num_replicas: 5") expectedReplicas += 6 testutils.SucceedsSoon(t, waitForReplicas) log.Info(ctx, "TestSystemZoneConfig: up-replication of system ranges succeeded") From 2e602f575d80ec99f3e02295082cc3355da7fcd6 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Wed, 22 Aug 2018 11:03:46 -0400 Subject: [PATCH 5/7] storage: prepare to enable merge queue by default Turn off the merge queue in all tests that need it. The actual default will be changed in a separate PR so that this commit can be safely backported to release-2.1. Release note: None --- docs/generated/settings/settings.html | 1 + pkg/ccl/storageccl/export_test.go | 2 ++ pkg/cmd/roachtest/election.go | 2 ++ pkg/sql/distsql_physical_planner_test.go | 17 +++++++++++++++-- pkg/sql/distsqlrun/cluster_test.go | 2 ++ pkg/sql/distsqlrun/tablereader_test.go | 7 +++++++ .../logictest/testdata/logic_test/distsql_agg | 4 ++++ .../testdata/logic_test/distsql_distinct_on | 4 ++++ .../logic_test/distsql_interleaved_join | 4 ++++ .../testdata/logic_test/distsql_lookup_join | 8 ++++++++ .../testdata/logic_test/distsql_numtables | 4 ++++ .../logictest/testdata/logic_test/distsql_stats | 4 ++++ .../testdata/logic_test/distsql_tighten_spans | 4 ++++ .../logictest/testdata/logic_test/distsql_union | 4 ++++ .../testdata/logic_test/explain_analyze | 4 ++++ pkg/sql/logictest/testdata/logic_test/ranges | 4 ++++ .../logic_test/select_index_span_ranges | 4 ++++ pkg/sql/logictest/testdata/logic_test/subquery | 4 ++++ .../logictest/testdata/planner_test/distsql_agg | 4 ++++ .../testdata/planner_test/distsql_distinct_on | 4 ++++ .../testdata/planner_test/distsql_indexjoin | 4 ++++ .../planner_test/distsql_interleaved_join | 4 ++++ .../testdata/planner_test/distsql_join | 4 ++++ .../testdata/planner_test/distsql_lookup_join | 4 ++++ .../testdata/planner_test/distsql_misc | 4 ++++ .../testdata/planner_test/distsql_numtables | 4 ++++ .../testdata/planner_test/distsql_sort | 4 ++++ .../testdata/planner_test/distsql_srfs | 4 ++++ .../testdata/planner_test/distsql_tighten_spans | 4 ++++ .../testdata/planner_test/distsql_union | 4 ++++ .../testdata/planner_test/distsql_window | 4 ++++ .../logictest/testdata/planner_test/subquery | 4 ++++ .../opt/exec/execbuilder/testdata/distsql_agg | 4 ++++ .../execbuilder/testdata/distsql_distinct_on | 4 ++++ .../exec/execbuilder/testdata/distsql_indexjoin | 4 ++++ .../testdata/distsql_interleaved_join | 4 ++++ .../opt/exec/execbuilder/testdata/distsql_join | 4 ++++ .../opt/exec/execbuilder/testdata/distsql_misc | 4 ++++ .../exec/execbuilder/testdata/distsql_numtables | 4 ++++ .../execbuilder/testdata/distsql_tighten_spans | 4 ++++ .../opt/exec/execbuilder/testdata/distsql_union | 4 ++++ .../opt/exec/execbuilder/testdata/lookup_join | 4 ++++ pkg/sql/opt/exec/execbuilder/testdata/subquery | 4 ++++ pkg/sql/run_control_test.go | 5 +++++ pkg/sql/scatter_test.go | 5 +++++ pkg/sql/trace_test.go | 5 +++++ pkg/sql/txn_restart_test.go | 2 ++ pkg/storage/client_merge_test.go | 1 + pkg/storage/client_test.go | 1 + pkg/storage/helpers_test.go | 5 +++++ pkg/storage/merge_queue.go | 14 +++++--------- pkg/storage/replica_test.go | 4 +++- pkg/storage/replicate_queue_test.go | 6 ++++++ pkg/workload/tpcc/ddls.go | 5 +++++ pkg/workload/workload.go | 5 +++++ 55 files changed, 229 insertions(+), 12 deletions(-) diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html index 58b2f6ece93b..10fa5d7b8d44 100644 --- a/docs/generated/settings/settings.html +++ b/docs/generated/settings/settings.html @@ -31,6 +31,7 @@ kv.raft_log.synchronizebooleantrueset to true to synchronize on Raft log writes to persistent storage ('false' risks data loss) kv.range.backpressure_range_size_multiplierfloat2multiple of range_max_bytes that a range is allowed to grow to without splitting before writes to that range are blocked, or 0 to disable kv.range_descriptor_cache.sizeinteger1000000maximum number of entries in the range descriptor and leaseholder caches +kv.range_merge.queue_enabledbooleanfalsewhether the automatic merge queue is enabled kv.rangefeed.enabledbooleanfalseif set, rangefeed registration is enabled kv.snapshot_rebalance.max_ratebyte size2.0 MiBthe rate limit (bytes/sec) to use for rebalance snapshots kv.snapshot_recovery.max_ratebyte size8.0 MiBthe rate limit (bytes/sec) to use for recovery snapshots diff --git a/pkg/ccl/storageccl/export_test.go b/pkg/ccl/storageccl/export_test.go index e09eb8ddf2ab..43196bb298c4 100644 --- a/pkg/ccl/storageccl/export_test.go +++ b/pkg/ccl/storageccl/export_test.go @@ -167,6 +167,8 @@ func TestExportCmd(t *testing.T) { var res5 ExportAndSlurpResult t.Run("ts5", func(t *testing.T) { + // Prevent the merge queue from immediately discarding our splits. + sqlDB.Exec(t, `SET CLUSTER SETTING kv.range_merge.queue_enabled = false`) sqlDB.Exec(t, `ALTER TABLE mvcclatest.export SPLIT AT VALUES (2)`) res5 = exportAndSlurp(t, hlc.Timestamp{}) expect(t, res5, 2, 2, 2, 7) diff --git a/pkg/cmd/roachtest/election.go b/pkg/cmd/roachtest/election.go index da3388d6b6d4..aad49719eaf1 100644 --- a/pkg/cmd/roachtest/election.go +++ b/pkg/cmd/roachtest/election.go @@ -36,6 +36,8 @@ func registerElectionAfterRestart(r *registry) { c.Run(ctx, c.Node(1), `./cockroach sql --insecure -e " CREATE DATABASE IF NOT EXISTS test; CREATE TABLE test.kv (k INT PRIMARY KEY, v INT); + -- Prevent the merge queue from immediately discarding our splits. + SET CLUSTER SETTING kv.range_merge.queue_enabled = false; ALTER TABLE test.kv SPLIT AT SELECT generate_series(0, 10000, 100)"`) start := timeutil.Now() diff --git a/pkg/sql/distsql_physical_planner_test.go b/pkg/sql/distsql_physical_planner_test.go index 6673ddcff024..7993dfd7dcc9 100644 --- a/pkg/sql/distsql_physical_planner_test.go +++ b/pkg/sql/distsql_physical_planner_test.go @@ -71,6 +71,11 @@ func SplitTable( t.Fatal(err) } + // Prevent the merge queue from immediately discarding our split. + if _, err := tc.ServerConn(0).Exec("SET CLUSTER SETTING kv.range_merge.queue_enabled = false"); err != nil { + t.Fatal(err) + } + _, rightRange, err := tc.Server(0).SplitRange(pik) if err != nil { t.Fatal(err) @@ -88,8 +93,8 @@ func SplitTable( } // TestPlanningDuringSplits verifies that table reader planning (resolving -// spans) tolerates concurrent splits. -func TestPlanningDuringSplits(t *testing.T) { +// spans) tolerates concurrent splits and merges. +func TestPlanningDuringSplitsAndMerges(t *testing.T) { defer leaktest.AfterTest(t)() const n = 100 @@ -316,6 +321,8 @@ func TestDistSQLRangeCachesIntegrationTest(t *testing.T) { // We're going to split one of the tables, but node 4 is unaware of this. _, err = db0.Exec(fmt.Sprintf(` + -- Prevent the merge queue from immediately discarding our splits. + SET CLUSTER SETTING kv.range_merge.queue_enabled = false; ALTER TABLE "right" SPLIT AT VALUES (1), (2), (3); ALTER TABLE "right" EXPERIMENTAL_RELOCATE VALUES (ARRAY[%d], 1), (ARRAY[%d], 2), (ARRAY[%d], 3); `, @@ -397,6 +404,9 @@ func TestDistSQLDeadHosts(t *testing.T) { r.Exec(t, "CREATE TABLE t (x INT PRIMARY KEY, xsquared INT)") + // Prevent the merge queue from immediately discarding our splits. + r.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue_enabled = false") + for i := 0; i < numNodes; i++ { r.Exec(t, fmt.Sprintf("ALTER TABLE t SPLIT AT VALUES (%d)", n*i/5)) } @@ -484,6 +494,9 @@ func TestDistSQLDrainingHosts(t *testing.T) { r := sqlutils.MakeSQLRunner(tc.ServerConn(0)) r.DB.SetMaxOpenConns(1) + // Prevent the merge queue from immediately discarding our splits. + r.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue_enabled = false") + r.Exec(t, "SET DISTSQL = ON") // Force the query to be distributed. r.Exec( diff --git a/pkg/sql/distsqlrun/cluster_test.go b/pkg/sql/distsqlrun/cluster_test.go index d100fc0c692d..cf27fc982cec 100644 --- a/pkg/sql/distsqlrun/cluster_test.go +++ b/pkg/sql/distsqlrun/cluster_test.go @@ -563,6 +563,8 @@ func TestDistSQLReadsFillGatewayID(t *testing.T) { sqlutils.ToRowFn(sqlutils.RowIdxFn)) if _, err := db.Exec(` +-- Prevent the merge queue from immediately discarding our splits. +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; ALTER TABLE t SPLIT AT VALUES (1), (2), (3); ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[3], 3); `); err != nil { diff --git a/pkg/sql/distsqlrun/tablereader_test.go b/pkg/sql/distsqlrun/tablereader_test.go index 29202f27127c..7c0fae73e568 100644 --- a/pkg/sql/distsqlrun/tablereader_test.go +++ b/pkg/sql/distsqlrun/tablereader_test.go @@ -197,6 +197,8 @@ func TestMisplannedRangesMetadata(t *testing.T) { sqlutils.ToRowFn(sqlutils.RowIdxFn)) _, err := db.Exec(` +-- Prevent the merge queue from immediately discarding our splits. +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; ALTER TABLE t SPLIT AT VALUES (1), (2), (3); ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[3], 3); `) @@ -307,6 +309,11 @@ func TestLimitScans(t *testing.T) { 100, /* numRows */ sqlutils.ToRowFn(sqlutils.RowIdxFn)) + // Prevent the merge queue from immediately discarding our splits. + if _, err := sqlDB.Exec("SET CLUSTER SETTING kv.range_merge.queue_enabled = false"); err != nil { + t.Fatal(err) + } + if _, err := sqlDB.Exec("ALTER TABLE t SPLIT AT VALUES (5)"); err != nil { t.Fatal(err) } diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_agg b/pkg/sql/logictest/testdata/logic_test/distsql_agg index ed1bff5a5f2e..537d0eff5427 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_agg +++ b/pkg/sql/logictest/testdata/logic_test/distsql_agg @@ -3,6 +3,10 @@ statement ok CREATE TABLE data (a INT, b INT, c FLOAT, d DECIMAL, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_distinct_on b/pkg/sql/logictest/testdata/logic_test/distsql_distinct_on index b49897fa06ab..0750b70f9587 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_distinct_on +++ b/pkg/sql/logictest/testdata/logic_test/distsql_distinct_on @@ -34,6 +34,10 @@ INSERT INTO abc VALUES ('2', '3', '4'), ('3', '4', '5') +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + statement ok ALTER TABLE xyz SPLIT AT VALUES (2), (4), (6), (7) diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_interleaved_join b/pkg/sql/logictest/testdata/logic_test/distsql_interleaved_join index 9b7e15d62560..9ada1f5538af 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_interleaved_join +++ b/pkg/sql/logictest/testdata/logic_test/distsql_interleaved_join @@ -168,6 +168,10 @@ FROM # Split our ranges # #################### +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split at parent1 key into five parts. statement ok ALTER TABLE parent1 SPLIT AT SELECT i FROM generate_series(8, 32, 8) AS g(i) diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_lookup_join b/pkg/sql/logictest/testdata/logic_test/distsql_lookup_join index 1d5cd5487f01..7deb55b7fd81 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_lookup_join +++ b/pkg/sql/logictest/testdata/logic_test/distsql_lookup_join @@ -9,6 +9,10 @@ SET experimental_force_lookup_join = true; statement ok CREATE TABLE data (a INT, b INT, c INT, d INT, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) @@ -227,6 +231,10 @@ true statement ok CREATE TABLE multiples (a INT, b INT, c INT, d INT, PRIMARY KEY (a, b), INDEX bc (b) STORING (c)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE multiples SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_numtables b/pkg/sql/logictest/testdata/logic_test/distsql_numtables index b3aed1b1c4af..7cde62705f2e 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_numtables +++ b/pkg/sql/logictest/testdata/logic_test/distsql_numtables @@ -13,6 +13,10 @@ INSERT INTO NumToSquare SELECT i, i*i FROM generate_series(1, 100) AS g(i) statement ok CREATE TABLE NumToStr (y INT PRIMARY KEY, str STRING) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into five parts. statement ok ALTER TABLE NumToStr SPLIT AT SELECT (i * 100 * 100 / 5)::int FROM generate_series(1, 4) AS g(i) diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_stats b/pkg/sql/logictest/testdata/logic_test/distsql_stats index 22b835cc27c3..b484c33d52b1 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_stats +++ b/pkg/sql/logictest/testdata/logic_test/distsql_stats @@ -3,6 +3,10 @@ statement ok CREATE TABLE data (a INT, b INT, c FLOAT, d DECIMAL, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_tighten_spans b/pkg/sql/logictest/testdata/logic_test/distsql_tighten_spans index f7d454d8ed0e..78a0459e09fb 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_tighten_spans +++ b/pkg/sql/logictest/testdata/logic_test/distsql_tighten_spans @@ -99,6 +99,10 @@ INSERT INTO decimal_t VALUES # Also split at the beginning of each index (0 for ASC, 100 for DESC) to # prevent interfering with previous indexes/tables. +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # p1 table (interleaved index) statement ok ALTER TABLE p1 SPLIT AT VALUES(2) diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_union b/pkg/sql/logictest/testdata/logic_test/distsql_union index ec1e6551cd14..3c9e7e6a9c5d 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_union +++ b/pkg/sql/logictest/testdata/logic_test/distsql_union @@ -16,6 +16,10 @@ INSERT INTO xyz VALUES (4, 2, 'b'), (5, 2, 'c') +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + statement ok ALTER TABLE xyz SPLIT AT VALUES (2), (3), (4), (5) diff --git a/pkg/sql/logictest/testdata/logic_test/explain_analyze b/pkg/sql/logictest/testdata/logic_test/explain_analyze index 522d18f8a8bf..40705f001102 100644 --- a/pkg/sql/logictest/testdata/logic_test/explain_analyze +++ b/pkg/sql/logictest/testdata/logic_test/explain_analyze @@ -49,6 +49,10 @@ CREATE TABLE kw (k INT PRIMARY KEY, w INT) statement ok INSERT INTO kw SELECT i, i FROM generate_series(1,5) AS g(i) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into 5 parts, each row from each table goes to one node. statement ok ALTER TABLE kv SPLIT AT SELECT i FROM generate_series(1,5) AS g(i) diff --git a/pkg/sql/logictest/testdata/logic_test/ranges b/pkg/sql/logictest/testdata/logic_test/ranges index 89c8fce58df4..35da2e1a3dc5 100644 --- a/pkg/sql/logictest/testdata/logic_test/ranges +++ b/pkg/sql/logictest/testdata/logic_test/ranges @@ -9,6 +9,10 @@ SHOW EXPERIMENTAL_RANGES FROM TABLE t start_key end_key range_id replicas lease_holder NULL NULL 1 {1} 1 +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + statement ok ALTER TABLE t SPLIT AT VALUES (1), (10) diff --git a/pkg/sql/logictest/testdata/logic_test/select_index_span_ranges b/pkg/sql/logictest/testdata/logic_test/select_index_span_ranges index ce5097eab2d1..0236178a36ec 100644 --- a/pkg/sql/logictest/testdata/logic_test/select_index_span_ranges +++ b/pkg/sql/logictest/testdata/logic_test/select_index_span_ranges @@ -35,6 +35,10 @@ INSERT INTO t VALUES (12, 0, 88, 0), (13, 0, 13, 0) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split the table across multiple ranges. statement ok ALTER TABLE t SPLIT AT VALUES (2) diff --git a/pkg/sql/logictest/testdata/logic_test/subquery b/pkg/sql/logictest/testdata/logic_test/subquery index 3bea232dd18a..eeb0addd5b14 100644 --- a/pkg/sql/logictest/testdata/logic_test/subquery +++ b/pkg/sql/logictest/testdata/logic_test/subquery @@ -157,6 +157,10 @@ CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) statement ok INSERT INTO abc VALUES (1, 2, 3), (4, 5, 6) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + statement ok ALTER TABLE abc SPLIT AT VALUES ((SELECT 1)) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_agg b/pkg/sql/logictest/testdata/planner_test/distsql_agg index 268092d5ab04..e191824d1645 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_agg +++ b/pkg/sql/logictest/testdata/planner_test/distsql_agg @@ -3,6 +3,10 @@ statement ok CREATE TABLE data (a INT, b INT, c FLOAT, d DECIMAL, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_distinct_on b/pkg/sql/logictest/testdata/planner_test/distsql_distinct_on index 1da4d71ffc98..5f2d1798f6e2 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_distinct_on +++ b/pkg/sql/logictest/testdata/planner_test/distsql_distinct_on @@ -16,6 +16,10 @@ CREATE TABLE abc ( PRIMARY KEY (a, b, c) ) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + statement ok ALTER TABLE xyz SPLIT AT VALUES (2), (4), (6), (7) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_indexjoin b/pkg/sql/logictest/testdata/planner_test/distsql_indexjoin index 16cb443cae43..bfa44c3c4dbc 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_indexjoin +++ b/pkg/sql/logictest/testdata/planner_test/distsql_indexjoin @@ -3,6 +3,10 @@ statement ok CREATE TABLE t (k INT PRIMARY KEY, v INT, w INT, INDEX v(v)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split the index into 5 parts, as if numbers were in the range 1 to 100. statement ok ALTER INDEX t@v SPLIT AT SELECT (i * 10)::int FROM generate_series(1, 4) AS g(i) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_interleaved_join b/pkg/sql/logictest/testdata/planner_test/distsql_interleaved_join index be229a030e31..dffa8fd4d1c3 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_interleaved_join +++ b/pkg/sql/logictest/testdata/planner_test/distsql_interleaved_join @@ -83,6 +83,10 @@ INTERLEAVE IN PARENT child2 (pid1, cid2, cid3) # Split our ranges # #################### +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split at parent1 key into five parts. statement ok ALTER TABLE parent1 SPLIT AT SELECT i FROM generate_series(8, 32, 8) AS g(i) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_join b/pkg/sql/logictest/testdata/planner_test/distsql_join index 0740ff8c863e..3830f1d8129f 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_join +++ b/pkg/sql/logictest/testdata/planner_test/distsql_join @@ -3,6 +3,10 @@ statement ok CREATE TABLE data (a INT, b INT, c INT, d INT, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_lookup_join b/pkg/sql/logictest/testdata/planner_test/distsql_lookup_join index 72a0866cf134..11426c07159d 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_lookup_join +++ b/pkg/sql/logictest/testdata/planner_test/distsql_lookup_join @@ -9,6 +9,10 @@ SET experimental_force_lookup_join = true; statement ok CREATE TABLE data (a INT, b INT, c INT, d INT, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_misc b/pkg/sql/logictest/testdata/planner_test/distsql_misc index 780e893f9a7f..ecc715f20d3a 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_misc +++ b/pkg/sql/logictest/testdata/planner_test/distsql_misc @@ -57,6 +57,10 @@ subtest stats statement ok CREATE TABLE data (a INT, b INT, c FLOAT, d DECIMAL, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_numtables b/pkg/sql/logictest/testdata/planner_test/distsql_numtables index 718801989e39..295b373a8041 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_numtables +++ b/pkg/sql/logictest/testdata/planner_test/distsql_numtables @@ -13,6 +13,10 @@ INSERT INTO NumToSquare SELECT i, i*i FROM generate_series(1, 100) AS g(i) statement ok CREATE TABLE NumToStr (y INT PRIMARY KEY, str STRING) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into five parts. statement ok ALTER TABLE NumToStr SPLIT AT SELECT (i * 100 * 100 / 5)::int FROM generate_series(1, 4) AS g(i) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_sort b/pkg/sql/logictest/testdata/planner_test/distsql_sort index 56e92ed1b3ce..4dac0e3bcc2b 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_sort +++ b/pkg/sql/logictest/testdata/planner_test/distsql_sort @@ -3,6 +3,10 @@ statement ok CREATE TABLE data (a INT, b INT, c INT, d INT, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_srfs b/pkg/sql/logictest/testdata/planner_test/distsql_srfs index 77484f166c17..1c5de94ac147 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_srfs +++ b/pkg/sql/logictest/testdata/planner_test/distsql_srfs @@ -3,6 +3,10 @@ statement ok CREATE TABLE data (a INT PRIMARY KEY) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_tighten_spans b/pkg/sql/logictest/testdata/planner_test/distsql_tighten_spans index 7cf137622df3..d84e5d393db3 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_tighten_spans +++ b/pkg/sql/logictest/testdata/planner_test/distsql_tighten_spans @@ -47,6 +47,10 @@ CREATE TABLE decimal_t (a DECIMAL PRIMARY KEY) # Also split at the beginning of each index (0 for ASC, 100 for DESC) to # prevent interfering with previous indexes/tables. +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # p1 table (interleaved index) statement ok ALTER TABLE p1 SPLIT AT VALUES(2) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_union b/pkg/sql/logictest/testdata/planner_test/distsql_union index a23c6bb84c23..f3df60eb8a21 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_union +++ b/pkg/sql/logictest/testdata/planner_test/distsql_union @@ -7,6 +7,10 @@ CREATE TABLE xyz ( z TEXT ) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + statement ok ALTER TABLE xyz SPLIT AT VALUES (2), (3), (4), (5) diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_window b/pkg/sql/logictest/testdata/planner_test/distsql_window index daaaaafcad0c..409426b058f3 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_window +++ b/pkg/sql/logictest/testdata/planner_test/distsql_window @@ -3,6 +3,10 @@ statement ok CREATE TABLE data (a INT, b INT, c FLOAT, d DECIMAL, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/logictest/testdata/planner_test/subquery b/pkg/sql/logictest/testdata/planner_test/subquery index 8fb7b5004c27..a7612daca008 100644 --- a/pkg/sql/logictest/testdata/planner_test/subquery +++ b/pkg/sql/logictest/testdata/planner_test/subquery @@ -5,6 +5,10 @@ statement ok CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + query TTT EXPLAIN ALTER TABLE abc SPLIT AT VALUES ((SELECT 42)) ---- diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_agg b/pkg/sql/opt/exec/execbuilder/testdata/distsql_agg index fe19243c666e..152b7e4559c1 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_agg +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_agg @@ -3,6 +3,10 @@ statement ok CREATE TABLE data (a INT, b INT, c FLOAT, d DECIMAL, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_distinct_on b/pkg/sql/opt/exec/execbuilder/testdata/distsql_distinct_on index f168ae975e74..d1d7ff876cf5 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_distinct_on +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_distinct_on @@ -16,6 +16,10 @@ CREATE TABLE abc ( PRIMARY KEY (a, b, c) ) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + statement ok ALTER TABLE xyz SPLIT AT VALUES (2), (4), (6), (7) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_indexjoin b/pkg/sql/opt/exec/execbuilder/testdata/distsql_indexjoin index 34734b5bf7be..676c73576eee 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_indexjoin +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_indexjoin @@ -3,6 +3,10 @@ statement ok CREATE TABLE t (k INT PRIMARY KEY, v INT, w INT, INDEX v(v)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split the index into 5 parts, as if numbers were in the range 1 to 100. statement ok ALTER INDEX t@v SPLIT AT SELECT (i * 10)::int FROM generate_series(1, 4) AS g(i) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_interleaved_join b/pkg/sql/opt/exec/execbuilder/testdata/distsql_interleaved_join index ec5ca4d2f5e3..39bcf5462070 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_interleaved_join +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_interleaved_join @@ -83,6 +83,10 @@ INTERLEAVE IN PARENT child2 (pid1, cid2, cid3) # Split our ranges # #################### +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split at parent1 key into five parts. statement ok ALTER TABLE parent1 SPLIT AT SELECT i FROM generate_series(8, 32, 8) AS g(i) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_join b/pkg/sql/opt/exec/execbuilder/testdata/distsql_join index 504e6d027d7c..0432b216ede3 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_join +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_join @@ -3,6 +3,10 @@ statement ok CREATE TABLE data (a INT, b INT, c INT, d INT, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_misc b/pkg/sql/opt/exec/execbuilder/testdata/distsql_misc index f3aa0a04a081..db20c1d89bac 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_misc +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_misc @@ -60,6 +60,10 @@ subtest stats statement ok CREATE TABLE data (a INT, b INT, c FLOAT, d DECIMAL, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_numtables b/pkg/sql/opt/exec/execbuilder/testdata/distsql_numtables index ff5556ed3538..1c3524ef25ab 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_numtables +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_numtables @@ -13,6 +13,10 @@ INSERT INTO NumToSquare SELECT i, i*i FROM generate_series(1, 100) AS g(i) statement ok CREATE TABLE NumToStr (y INT PRIMARY KEY, str STRING) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into five parts. statement ok ALTER TABLE NumToStr SPLIT AT SELECT (i * 100 * 100 / 5)::int FROM generate_series(1, 4) AS g(i) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_tighten_spans b/pkg/sql/opt/exec/execbuilder/testdata/distsql_tighten_spans index 88ad97c0c82c..2dbcbc5d1697 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_tighten_spans +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_tighten_spans @@ -47,6 +47,10 @@ CREATE TABLE decimal_t (a DECIMAL PRIMARY KEY) # Also split at the beginning of each index (0 for ASC, 100 for DESC) to # prevent interfering with previous indexes/tables. +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # p1 table (interleaved index) statement ok ALTER TABLE p1 SPLIT AT VALUES(2) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_union b/pkg/sql/opt/exec/execbuilder/testdata/distsql_union index 4b25b39530d1..4eb06a89fe3f 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_union +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_union @@ -7,6 +7,10 @@ CREATE TABLE xyz ( z TEXT ) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + statement ok ALTER TABLE xyz SPLIT AT VALUES (2), (3), (4), (5) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/lookup_join b/pkg/sql/opt/exec/execbuilder/testdata/lookup_join index 60ac2e8d68d3..003dc3212942 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/lookup_join +++ b/pkg/sql/opt/exec/execbuilder/testdata/lookup_join @@ -97,6 +97,10 @@ lookup-join · · (a, b, c, d, e, f) · statement ok CREATE TABLE data (a INT, b INT, c INT, d INT, PRIMARY KEY (a, b, c, d)) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + # Split into ten parts. statement ok ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/subquery b/pkg/sql/opt/exec/execbuilder/testdata/subquery index bfbddbf09e1b..dc395c1ac9c2 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/subquery +++ b/pkg/sql/opt/exec/execbuilder/testdata/subquery @@ -6,6 +6,10 @@ statement ok CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) +# Prevent the merge queue from immediately discarding our splits. +statement ok +SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + query TTT EXPLAIN ALTER TABLE abc SPLIT AT VALUES ((SELECT 42)) ---- diff --git a/pkg/sql/run_control_test.go b/pkg/sql/run_control_test.go index ae40e9f32317..08c69583bd10 100644 --- a/pkg/sql/run_control_test.go +++ b/pkg/sql/run_control_test.go @@ -233,6 +233,11 @@ func TestCancelDistSQLQuery(t *testing.T) { t.Fatal(err) } + // Prevent the merge queue from immediately discarding our splits. + if _, err := conn1.Exec("SET CLUSTER SETTING kv.range_merge.queue_enabled = false"); err != nil { + t.Fatal(err) + } + if _, err := conn1.Exec("ALTER TABLE nums SPLIT AT VALUES (50)"); err != nil { t.Fatal(err) } diff --git a/pkg/sql/scatter_test.go b/pkg/sql/scatter_test.go index 86383860ab24..561279b7f458 100644 --- a/pkg/sql/scatter_test.go +++ b/pkg/sql/scatter_test.go @@ -51,6 +51,9 @@ func TestScatterRandomizeLeases(t *testing.T) { r := sqlutils.MakeSQLRunner(tc.ServerConn(0)) + // Prevent the merge queue from immediately discarding our splits. + r.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue_enabled = false") + // Introduce 99 splits to get 100 ranges. r.Exec(t, "ALTER TABLE test.t SPLIT AT (SELECT i*10 FROM generate_series(1, 99) AS g(i))") @@ -122,6 +125,8 @@ func TestScatterResponse(t *testing.T) { tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t") r := sqlutils.MakeSQLRunner(sqlDB) + // Prevent the merge queue from immediately discarding our splits. + r.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue_enabled = false") r.Exec(t, "ALTER TABLE test.t SPLIT AT (SELECT i*10 FROM generate_series(1, 99) AS g(i))") rows := r.Query(t, "ALTER TABLE test.t SCATTER") diff --git a/pkg/sql/trace_test.go b/pkg/sql/trace_test.go index 79b909c5963b..34529d517cb9 100644 --- a/pkg/sql/trace_test.go +++ b/pkg/sql/trace_test.go @@ -235,6 +235,9 @@ func TestTrace(t *testing.T) { if _, err := clusterDB.Exec(` CREATE DATABASE test; + -- Prevent the merge queue from immediately discarding our splits. + SET CLUSTER SETTING kv.range_merge.queue_enabled = false; + --- test.foo is a single range table. CREATE TABLE test.foo (id INT PRIMARY KEY); @@ -520,6 +523,8 @@ func TestKVTraceDistSQL(t *testing.T) { r.Exec(t, "CREATE DATABASE test") r.Exec(t, "CREATE TABLE test.a (a INT PRIMARY KEY, b INT)") r.Exec(t, "INSERT INTO test.a VALUES (1,1), (2,2)") + // Prevent the merge queue from immediately discarding our splits. + r.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue_enabled = false") r.Exec(t, "ALTER TABLE a SPLIT AT VALUES(1)") r.Exec(t, "SET tracing = on,kv; SELECT count(*) FROM test.a; SET tracing = off") diff --git a/pkg/sql/txn_restart_test.go b/pkg/sql/txn_restart_test.go index 766ea78346bc..619f5f66d8cb 100644 --- a/pkg/sql/txn_restart_test.go +++ b/pkg/sql/txn_restart_test.go @@ -1535,6 +1535,8 @@ func TestDistSQLRetryableError(t *testing.T) { // We're going to split one of the tables, but node 4 is unaware of this. _, err := db.Exec(fmt.Sprintf(` + -- Prevent the merge queue from immediately discarding our splits. + SET CLUSTER SETTING kv.range_merge.queue_enabled = false; ALTER TABLE "t" SPLIT AT VALUES (1), (2), (3); ALTER TABLE "t" EXPERIMENTAL_RELOCATE VALUES (ARRAY[%d], 1), (ARRAY[%d], 2), (ARRAY[%d], 3); `, diff --git a/pkg/storage/client_merge_test.go b/pkg/storage/client_merge_test.go index cdf838cd861f..889941f7381d 100644 --- a/pkg/storage/client_merge_test.go +++ b/pkg/storage/client_merge_test.go @@ -2091,6 +2091,7 @@ func TestMergeQueue(t *testing.T) { mtc.Start(t, 2) defer mtc.Stop() store := mtc.Store(0) + store.SetMergeQueueActive(true) split := func(t *testing.T, key roachpb.Key) { t.Helper() diff --git a/pkg/storage/client_test.go b/pkg/storage/client_test.go index c36f2e93a267..aa48dbc21d6a 100644 --- a/pkg/storage/client_test.go +++ b/pkg/storage/client_test.go @@ -627,6 +627,7 @@ func (m *multiTestContext) makeStoreConfig(i int) storage.StoreConfig { cfg.NodeDialer = m.nodeDialer cfg.Transport = m.transport cfg.Gossip = m.gossips[i] + cfg.TestingKnobs.DisableMergeQueue = true cfg.TestingKnobs.DisableSplitQueue = true cfg.TestingKnobs.ReplicateQueueAcceptsUnsplit = true return cfg diff --git a/pkg/storage/helpers_test.go b/pkg/storage/helpers_test.go index 49773625a951..e9b2f884b118 100644 --- a/pkg/storage/helpers_test.go +++ b/pkg/storage/helpers_test.go @@ -184,6 +184,11 @@ func (s *Store) SetSplitQueueActive(active bool) { s.setSplitQueueActive(active) } +// SetMergeQueueActive enables or disables the split queue. +func (s *Store) SetMergeQueueActive(active bool) { + s.setMergeQueueActive(active) +} + // SetRaftSnapshotQueueActive enables or disables the raft snapshot queue. func (s *Store) SetRaftSnapshotQueueActive(active bool) { s.setRaftSnapshotQueueActive(active) diff --git a/pkg/storage/merge_queue.go b/pkg/storage/merge_queue.go index 71f0542bfafd..8ab8b91ac83a 100644 --- a/pkg/storage/merge_queue.go +++ b/pkg/storage/merge_queue.go @@ -46,15 +46,11 @@ const ( // MergeQueueEnabled is a setting that controls whether the merge queue is // enabled. -var MergeQueueEnabled = func() *settings.BoolSetting { - s := settings.RegisterBoolSetting( - "kv.range_merge.queue_enabled", - "whether the automatic merge queue is enabled", - false, - ) - s.Hide() - return s -}() +var MergeQueueEnabled = settings.RegisterBoolSetting( + "kv.range_merge.queue_enabled", + "whether the automatic merge queue is enabled", + false, +) // MergeQueueInterval is a setting that controls how often the merge queue waits // between processing replicas. diff --git a/pkg/storage/replica_test.go b/pkg/storage/replica_test.go index 29d004d25ace..d54e15e2c6b0 100644 --- a/pkg/storage/replica_test.go +++ b/pkg/storage/replica_test.go @@ -198,8 +198,10 @@ func (tc *testContext) StartWithStoreConfig(t testing.TB, stopper *stop.Stopper, tc.store = NewStore(cfg, tc.engine, &roachpb.NodeDescriptor{NodeID: 1}) // Now that we have our actual store, monkey patch the factory used in cfg.DB. factory.setStore(tc.store) - // We created the store without a real KV client, so it can't perform splits. + // We created the store without a real KV client, so it can't perform splits + // or merges. tc.store.splitQueue.SetDisabled(true) + tc.store.mergeQueue.SetDisabled(true) if tc.repl == nil && tc.bootstrapMode == bootstrapRangeWithMetadata { if err := tc.store.BootstrapRange(nil, cfg.Settings.Version.ServerVersion); err != nil { diff --git a/pkg/storage/replicate_queue_test.go b/pkg/storage/replicate_queue_test.go index bc466debce8e..446aecd23d3d 100644 --- a/pkg/storage/replicate_queue_test.go +++ b/pkg/storage/replicate_queue_test.go @@ -183,6 +183,12 @@ func TestReplicateQueueDownReplicate(t *testing.T) { ServerArgs: base.TestServerArgs{ ScanMinIdleTime: time.Millisecond, ScanMaxIdleTime: time.Millisecond, + Knobs: base.TestingKnobs{ + Store: &storage.StoreTestingKnobs{ + // Prevent the merge queue from immediately discarding our splits. + DisableMergeQueue: true, + }, + }, }, }, ) diff --git a/pkg/workload/tpcc/ddls.go b/pkg/workload/tpcc/ddls.go index ab3d19730b73..80f867fa023e 100644 --- a/pkg/workload/tpcc/ddls.go +++ b/pkg/workload/tpcc/ddls.go @@ -163,6 +163,11 @@ const ( // NB: Since we always split at the same points (specific warehouse IDs and // item IDs), splitting is idempotent. func splitTables(db *gosql.DB, warehouses int) { + // Prevent the merge queue from immediately discarding our splits. + if _, err := db.Exec("SET CLUSTER SETTING kv.range_merge.queue_enabled = false"); err != nil { + panic(err) + } + var g errgroup.Group const concurrency = 64 sem := make(chan struct{}, concurrency) diff --git a/pkg/workload/workload.go b/pkg/workload/workload.go index 98fab6c6f964..d3600129c611 100644 --- a/pkg/workload/workload.go +++ b/pkg/workload/workload.go @@ -362,6 +362,11 @@ func Setup( // Split creates the range splits defined by the given table. func Split(ctx context.Context, db *gosql.DB, table Table, concurrency int) error { + // Prevent the merge queue from immediately discarding our splits. + if _, err := db.Exec(`SET CLUSTER SETTING kv.range_merge.queue_enabled = false`); err != nil { + return err + } + if table.Splits.NumBatches <= 0 { return nil } From 8cbaa0d216329396ca2f6f4ae33f1ca6d7fb98fe Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Wed, 22 Aug 2018 19:38:51 -0400 Subject: [PATCH 6/7] sql: prevent SPLIT AT when the merge queue is enabled Splitting while the merge queue is enabled is almost certainly a user mistake. Add a best-effort check to prevent users from splitting while the merge queue is enabled. Users can override the check and request a split anyway by twiddling a new session variable, experimental_force_split_at. We have plans to eventually make the splits created by SPLIT AT "sticky", so that the merge queue does not immediately merge them away, but not in time for 2.1. Release note (sql change): ALTER TABLE ... SPLIT AT now produces an error if executed while the merge queue is enabled, as the merge queue is likely to immediately discard any splits created by the statement. --- pkg/sql/exec_util.go | 4 ++++ .../logictest/testdata/logic_test/pg_catalog | 3 +++ .../logictest/testdata/logic_test/show_source | 1 + .../logictest/testdata/planner_test/explain | 10 ++++----- pkg/sql/opt/exec/execbuilder/testdata/explain | 10 ++++----- pkg/sql/sessiondata/session_data.go | 3 +++ pkg/sql/split.go | 15 +++++++++++++ pkg/sql/split_test.go | 16 ++++++++++++++ pkg/sql/vars.go | 22 +++++++++++++++++++ 9 files changed, 74 insertions(+), 10 deletions(-) diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index f30211b95586..fe93fb3e95d1 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -1565,6 +1565,10 @@ func (m *sessionDataMutator) SetLookupJoinEnabled(val bool) { m.data.LookupJoinEnabled = val } +func (m *sessionDataMutator) SetForceSplitAt(val bool) { + m.data.ForceSplitAt = val +} + func (m *sessionDataMutator) SetZigzagJoinEnabled(val bool) { m.data.ZigzagJoinEnabled = val } diff --git a/pkg/sql/logictest/testdata/logic_test/pg_catalog b/pkg/sql/logictest/testdata/logic_test/pg_catalog index 4b317230b784..e0b8f6af795c 100644 --- a/pkg/sql/logictest/testdata/logic_test/pg_catalog +++ b/pkg/sql/logictest/testdata/logic_test/pg_catalog @@ -1310,6 +1310,7 @@ default_transaction_isolation serializable NULL NULL NULL default_transaction_read_only off NULL NULL NULL string distsql off NULL NULL NULL string experimental_force_lookup_join off NULL NULL NULL string +experimental_force_split_at off NULL NULL NULL string experimental_force_zigzag_join off NULL NULL NULL string experimental_serial_normalization rowid NULL NULL NULL string extra_float_digits 0 NULL NULL NULL string @@ -1346,6 +1347,7 @@ default_transaction_isolation serializable NULL user NULL serial default_transaction_read_only off NULL user NULL off off distsql off NULL user NULL off off experimental_force_lookup_join off NULL user NULL off off +experimental_force_split_at off NULL user NULL off off experimental_force_zigzag_join off NULL user NULL off off experimental_serial_normalization rowid NULL user NULL rowid rowid extra_float_digits 0 NULL user NULL 0 0 @@ -1382,6 +1384,7 @@ default_transaction_isolation NULL NULL NULL NULL NULL default_transaction_read_only NULL NULL NULL NULL NULL distsql NULL NULL NULL NULL NULL experimental_force_lookup_join NULL NULL NULL NULL NULL +experimental_force_split_at NULL NULL NULL NULL NULL experimental_force_zigzag_join NULL NULL NULL NULL NULL experimental_opt NULL NULL NULL NULL NULL experimental_serial_normalization NULL NULL NULL NULL NULL diff --git a/pkg/sql/logictest/testdata/logic_test/show_source b/pkg/sql/logictest/testdata/logic_test/show_source index 762c923cf063..2d886f7069fc 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_source +++ b/pkg/sql/logictest/testdata/logic_test/show_source @@ -34,6 +34,7 @@ default_transaction_isolation serializable default_transaction_read_only off distsql off experimental_force_lookup_join off +experimental_force_split_at off experimental_force_zigzag_join off experimental_serial_normalization rowid extra_float_digits 0 diff --git a/pkg/sql/logictest/testdata/planner_test/explain b/pkg/sql/logictest/testdata/planner_test/explain index 393a3236f48a..d543bf5219c9 100644 --- a/pkg/sql/logictest/testdata/planner_test/explain +++ b/pkg/sql/logictest/testdata/planner_test/explain @@ -167,7 +167,7 @@ EXPLAIN SHOW DATABASE render · · └── filter · · └── values · · -· size 2 columns, 32 rows +· size 2 columns, 33 rows query TTT EXPLAIN SHOW TIME ZONE @@ -175,7 +175,7 @@ EXPLAIN SHOW TIME ZONE render · · └── filter · · └── values · · -· size 2 columns, 32 rows +· size 2 columns, 33 rows query TTT EXPLAIN SHOW DEFAULT_TRANSACTION_ISOLATION @@ -183,7 +183,7 @@ EXPLAIN SHOW DEFAULT_TRANSACTION_ISOLATION render · · └── filter · · └── values · · -· size 2 columns, 32 rows +· size 2 columns, 33 rows query TTT EXPLAIN SHOW TRANSACTION ISOLATION LEVEL @@ -191,7 +191,7 @@ EXPLAIN SHOW TRANSACTION ISOLATION LEVEL render · · └── filter · · └── values · · -· size 2 columns, 32 rows +· size 2 columns, 33 rows query TTT EXPLAIN SHOW TRANSACTION PRIORITY @@ -199,7 +199,7 @@ EXPLAIN SHOW TRANSACTION PRIORITY render · · └── filter · · └── values · · -· size 2 columns, 32 rows +· size 2 columns, 33 rows query TTT EXPLAIN SHOW COLUMNS FROM foo diff --git a/pkg/sql/opt/exec/execbuilder/testdata/explain b/pkg/sql/opt/exec/execbuilder/testdata/explain index e73d0372b9b9..95cc4f9480ee 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/explain +++ b/pkg/sql/opt/exec/execbuilder/testdata/explain @@ -160,7 +160,7 @@ EXPLAIN SHOW DATABASE render · · └── filter · · └── values · · -· size 2 columns, 32 rows +· size 2 columns, 33 rows query TTT EXPLAIN SHOW TIME ZONE @@ -168,7 +168,7 @@ EXPLAIN SHOW TIME ZONE render · · └── filter · · └── values · · -· size 2 columns, 32 rows +· size 2 columns, 33 rows query TTT EXPLAIN SHOW DEFAULT_TRANSACTION_ISOLATION @@ -176,7 +176,7 @@ EXPLAIN SHOW DEFAULT_TRANSACTION_ISOLATION render · · └── filter · · └── values · · -· size 2 columns, 32 rows +· size 2 columns, 33 rows query TTT EXPLAIN SHOW TRANSACTION ISOLATION LEVEL @@ -184,7 +184,7 @@ EXPLAIN SHOW TRANSACTION ISOLATION LEVEL render · · └── filter · · └── values · · -· size 2 columns, 32 rows +· size 2 columns, 33 rows query TTT EXPLAIN SHOW TRANSACTION PRIORITY @@ -192,7 +192,7 @@ EXPLAIN SHOW TRANSACTION PRIORITY render · · └── filter · · └── values · · -· size 2 columns, 32 rows +· size 2 columns, 33 rows query TTT EXPLAIN SHOW COLUMNS FROM foo diff --git a/pkg/sql/sessiondata/session_data.go b/pkg/sql/sessiondata/session_data.go index 7e31e7bf9ca1..a10949677ad9 100644 --- a/pkg/sql/sessiondata/session_data.go +++ b/pkg/sql/sessiondata/session_data.go @@ -46,6 +46,9 @@ type SessionData struct { // lookup join where the left side is scanned and index lookups are done on // the right side. Will emit a warning if a lookup join can't be planned. LookupJoinEnabled bool + // ForceSplitAt indicates whether checks to prevent incorrect usage of ALTER + // TABLE ... SPLIT AT should be skipped. + ForceSplitAt bool // OptimizerMode indicates whether to use the experimental optimizer for // query planning. OptimizerMode OptimizerMode diff --git a/pkg/sql/split.go b/pkg/sql/split.go index 815874deee93..a3f23f089751 100644 --- a/pkg/sql/split.go +++ b/pkg/sql/split.go @@ -24,11 +24,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/types" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" + "github.com/cockroachdb/cockroach/pkg/storage" ) type splitNode struct { optColumnsSlot + force bool tableDesc *sqlbase.TableDescriptor index *sqlbase.IndexDescriptor rows planNode @@ -76,6 +78,7 @@ func (p *planner) Split(ctx context.Context, n *tree.Split) (planNode, error) { } return &splitNode{ + force: p.SessionData().ForceSplitAt, tableDesc: tableDesc, index: index, rows: rows, @@ -98,6 +101,18 @@ type splitRun struct { lastSplitKey []byte } +func (n *splitNode) startExec(params runParams) error { + // This check is not intended to be foolproof. The setting could be outdated + // because of gossip inconsistency, or it could change halfway through the + // SPLIT AT's execution. It is, however, likely to prevent user error and + // confusion in the common case. + if !n.force && storage.MergeQueueEnabled.Get(¶ms.p.ExecCfg().Settings.SV) { + return errors.New("splits would be immediately discarded by merge queue; " + + "disable the merge queue first by running 'SET CLUSTER SETTING kv.range_merge.queue_enabled = false'") + } + return nil +} + func (n *splitNode) Next(params runParams) (bool, error) { // TODO(radu): instead of performing the splits sequentially, accumulate all // the split keys and then perform the splits in parallel (e.g. split at the diff --git a/pkg/sql/split_test.go b/pkg/sql/split_test.go index 761b8e45b246..f43543fc6e6d 100644 --- a/pkg/sql/split_test.go +++ b/pkg/sql/split_test.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/tests" + "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -45,6 +46,21 @@ func TestSplitAt(t *testing.T) { )`) r.Exec(t, `CREATE TABLE d.i (k INT PRIMARY KEY)`) + // Verify that ALTER TABLE ... SPLIT AT is rejected when the merge queue is + // enabled. + r.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue_enabled = true") + expErr := "splits would be immediately discarded by merge queue" + if _, err := db.Exec("ALTER TABLE d.t SPLIT AT VALUES (1, 'a')"); !testutils.IsError(err, expErr) { + t.Fatalf("expected %q error but got %v", expErr, err) + } + + // Verify that we can override the merge queue check with a session variable. + r.Exec(t, "SET experimental_force_split_at = true") + r.Exec(t, "ALTER TABLE d.t SPLIT AT VALUES (1, 'a')") + + // Prevent the merge queue from immediately discarding our splits. + r.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue_enabled = false") + tests := []struct { in string error string diff --git a/pkg/sql/vars.go b/pkg/sql/vars.go index 35c9356c99e4..4e733dd97ee8 100644 --- a/pkg/sql/vars.go +++ b/pkg/sql/vars.go @@ -278,6 +278,28 @@ var varGen = map[string]sessionVar{ }, }, + // CockroachDB extension. + `experimental_force_split_at`: { + Set: func( + _ context.Context, m *sessionDataMutator, + evalCtx *extendedEvalContext, values []tree.TypedExpr, + ) error { + s, err := getSingleBool("experimental_force_split_at", evalCtx, values) + if err != nil { + return err + } + m.SetForceSplitAt(bool(*s)) + return nil + }, + Get: func(evalCtx *extendedEvalContext) string { + return formatBoolAsPostgresSetting(evalCtx.SessionData.ForceSplitAt) + }, + Reset: func(m *sessionDataMutator) error { + m.SetForceSplitAt(false) + return nil + }, + }, + // CockroachDB extension. `experimental_force_zigzag_join`: { Set: func( From 98ca1d0fe35431a26c6cbfbcf9d7b581d1373c1f Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Wed, 22 Aug 2018 19:57:52 -0400 Subject: [PATCH 7/7] storage: enable the merge queue by default Start smoking out bugs in range merges by enabling them by default. This commit will not be backported to v2.1 until we've gained enough confidence in their stability on master. Release note: None --- docs/generated/settings/settings.html | 2 +- pkg/storage/merge_queue.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html index 10fa5d7b8d44..ec32f20fbdca 100644 --- a/docs/generated/settings/settings.html +++ b/docs/generated/settings/settings.html @@ -31,7 +31,7 @@ kv.raft_log.synchronizebooleantrueset to true to synchronize on Raft log writes to persistent storage ('false' risks data loss) kv.range.backpressure_range_size_multiplierfloat2multiple of range_max_bytes that a range is allowed to grow to without splitting before writes to that range are blocked, or 0 to disable kv.range_descriptor_cache.sizeinteger1000000maximum number of entries in the range descriptor and leaseholder caches -kv.range_merge.queue_enabledbooleanfalsewhether the automatic merge queue is enabled +kv.range_merge.queue_enabledbooleantruewhether the automatic merge queue is enabled kv.rangefeed.enabledbooleanfalseif set, rangefeed registration is enabled kv.snapshot_rebalance.max_ratebyte size2.0 MiBthe rate limit (bytes/sec) to use for rebalance snapshots kv.snapshot_recovery.max_ratebyte size8.0 MiBthe rate limit (bytes/sec) to use for recovery snapshots diff --git a/pkg/storage/merge_queue.go b/pkg/storage/merge_queue.go index 8ab8b91ac83a..ef97073ebe5a 100644 --- a/pkg/storage/merge_queue.go +++ b/pkg/storage/merge_queue.go @@ -49,7 +49,7 @@ const ( var MergeQueueEnabled = settings.RegisterBoolSetting( "kv.range_merge.queue_enabled", "whether the automatic merge queue is enabled", - false, + true, ) // MergeQueueInterval is a setting that controls how often the merge queue waits