Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

storage: enable merge queue by default #28961

Merged
merged 7 commits into from
Aug 27, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/generated/settings/settings.html
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
<tr><td><code>kv.raft_log.synchronize</code></td><td>boolean</td><td><code>true</code></td><td>set to true to synchronize on Raft log writes to persistent storage ('false' risks data loss)</td></tr>
<tr><td><code>kv.range.backpressure_range_size_multiplier</code></td><td>float</td><td><code>2</code></td><td>multiple of range_max_bytes that a range is allowed to grow to without splitting before writes to that range are blocked, or 0 to disable</td></tr>
<tr><td><code>kv.range_descriptor_cache.size</code></td><td>integer</td><td><code>1000000</code></td><td>maximum number of entries in the range descriptor and leaseholder caches</td></tr>
<tr><td><code>kv.range_merge.queue_enabled</code></td><td>boolean</td><td><code>true</code></td><td>whether the automatic merge queue is enabled</td></tr>
<tr><td><code>kv.rangefeed.enabled</code></td><td>boolean</td><td><code>false</code></td><td>if set, rangefeed registration is enabled</td></tr>
<tr><td><code>kv.snapshot_rebalance.max_rate</code></td><td>byte size</td><td><code>2.0 MiB</code></td><td>the rate limit (bytes/sec) to use for rebalance snapshots</td></tr>
<tr><td><code>kv.snapshot_recovery.max_rate</code></td><td>byte size</td><td><code>8.0 MiB</code></td><td>the rate limit (bytes/sec) to use for recovery snapshots</td></tr>
Expand Down
2 changes: 2 additions & 0 deletions pkg/ccl/storageccl/export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,8 @@ func TestExportCmd(t *testing.T) {

var res5 ExportAndSlurpResult
t.Run("ts5", func(t *testing.T) {
// Prevent the merge queue from immediately discarding our splits.
sqlDB.Exec(t, `SET CLUSTER SETTING kv.range_merge.queue_enabled = false`)
sqlDB.Exec(t, `ALTER TABLE mvcclatest.export SPLIT AT VALUES (2)`)
res5 = exportAndSlurp(t, hlc.Timestamp{})
expect(t, res5, 2, 2, 2, 7)
Expand Down
2 changes: 2 additions & 0 deletions pkg/cmd/roachtest/election.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ func registerElectionAfterRestart(r *registry) {
c.Run(ctx, c.Node(1), `./cockroach sql --insecure -e "
CREATE DATABASE IF NOT EXISTS test;
CREATE TABLE test.kv (k INT PRIMARY KEY, v INT);
-- Prevent the merge queue from immediately discarding our splits.
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;
ALTER TABLE test.kv SPLIT AT SELECT generate_series(0, 10000, 100)"`)

start := timeutil.Now()
Expand Down
17 changes: 15 additions & 2 deletions pkg/sql/distsql_physical_planner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,11 @@ func SplitTable(
t.Fatal(err)
}

// Prevent the merge queue from immediately discarding our split.
if _, err := tc.ServerConn(0).Exec("SET CLUSTER SETTING kv.range_merge.queue_enabled = false"); err != nil {
t.Fatal(err)
}

_, rightRange, err := tc.Server(0).SplitRange(pik)
if err != nil {
t.Fatal(err)
Expand All @@ -88,8 +93,8 @@ func SplitTable(
}

// TestPlanningDuringSplits verifies that table reader planning (resolving
// spans) tolerates concurrent splits.
func TestPlanningDuringSplits(t *testing.T) {
// spans) tolerates concurrent splits and merges.
func TestPlanningDuringSplitsAndMerges(t *testing.T) {
defer leaktest.AfterTest(t)()

const n = 100
Expand Down Expand Up @@ -316,6 +321,8 @@ func TestDistSQLRangeCachesIntegrationTest(t *testing.T) {

// We're going to split one of the tables, but node 4 is unaware of this.
_, err = db0.Exec(fmt.Sprintf(`
-- Prevent the merge queue from immediately discarding our splits.
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;
ALTER TABLE "right" SPLIT AT VALUES (1), (2), (3);
ALTER TABLE "right" EXPERIMENTAL_RELOCATE VALUES (ARRAY[%d], 1), (ARRAY[%d], 2), (ARRAY[%d], 3);
`,
Expand Down Expand Up @@ -397,6 +404,9 @@ func TestDistSQLDeadHosts(t *testing.T) {

r.Exec(t, "CREATE TABLE t (x INT PRIMARY KEY, xsquared INT)")

// Prevent the merge queue from immediately discarding our splits.
r.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue_enabled = false")

for i := 0; i < numNodes; i++ {
r.Exec(t, fmt.Sprintf("ALTER TABLE t SPLIT AT VALUES (%d)", n*i/5))
}
Expand Down Expand Up @@ -484,6 +494,9 @@ func TestDistSQLDrainingHosts(t *testing.T) {
r := sqlutils.MakeSQLRunner(tc.ServerConn(0))
r.DB.SetMaxOpenConns(1)

// Prevent the merge queue from immediately discarding our splits.
r.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue_enabled = false")

r.Exec(t, "SET DISTSQL = ON")
// Force the query to be distributed.
r.Exec(
Expand Down
2 changes: 2 additions & 0 deletions pkg/sql/distsqlrun/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -563,6 +563,8 @@ func TestDistSQLReadsFillGatewayID(t *testing.T) {
sqlutils.ToRowFn(sqlutils.RowIdxFn))

if _, err := db.Exec(`
-- Prevent the merge queue from immediately discarding our splits.
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;
ALTER TABLE t SPLIT AT VALUES (1), (2), (3);
ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[3], 3);
`); err != nil {
Expand Down
7 changes: 7 additions & 0 deletions pkg/sql/distsqlrun/tablereader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,8 @@ func TestMisplannedRangesMetadata(t *testing.T) {
sqlutils.ToRowFn(sqlutils.RowIdxFn))

_, err := db.Exec(`
-- Prevent the merge queue from immediately discarding our splits.
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;
ALTER TABLE t SPLIT AT VALUES (1), (2), (3);
ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[3], 3);
`)
Expand Down Expand Up @@ -307,6 +309,11 @@ func TestLimitScans(t *testing.T) {
100, /* numRows */
sqlutils.ToRowFn(sqlutils.RowIdxFn))

// Prevent the merge queue from immediately discarding our splits.
if _, err := sqlDB.Exec("SET CLUSTER SETTING kv.range_merge.queue_enabled = false"); err != nil {
t.Fatal(err)
}

if _, err := sqlDB.Exec("ALTER TABLE t SPLIT AT VALUES (5)"); err != nil {
t.Fatal(err)
}
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/exec_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -1565,6 +1565,10 @@ func (m *sessionDataMutator) SetLookupJoinEnabled(val bool) {
m.data.LookupJoinEnabled = val
}

func (m *sessionDataMutator) SetForceSplitAt(val bool) {
m.data.ForceSplitAt = val
}

func (m *sessionDataMutator) SetZigzagJoinEnabled(val bool) {
m.data.ZigzagJoinEnabled = val
}
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/distsql_agg
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
statement ok
CREATE TABLE data (a INT, b INT, c FLOAT, d DECIMAL, PRIMARY KEY (a, b, c, d))

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split into ten parts.
statement ok
ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i)
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/distsql_distinct_on
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ INSERT INTO abc VALUES
('2', '3', '4'),
('3', '4', '5')

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

statement ok
ALTER TABLE xyz SPLIT AT VALUES (2), (4), (6), (7)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,10 @@ FROM
# Split our ranges #
####################

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split at parent1 key into five parts.
statement ok
ALTER TABLE parent1 SPLIT AT SELECT i FROM generate_series(8, 32, 8) AS g(i)
Expand Down
8 changes: 8 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/distsql_lookup_join
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@ SET experimental_force_lookup_join = true;
statement ok
CREATE TABLE data (a INT, b INT, c INT, d INT, PRIMARY KEY (a, b, c, d))

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split into ten parts.
statement ok
ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i)
Expand Down Expand Up @@ -227,6 +231,10 @@ true
statement ok
CREATE TABLE multiples (a INT, b INT, c INT, d INT, PRIMARY KEY (a, b), INDEX bc (b) STORING (c))

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split into ten parts.
statement ok
ALTER TABLE multiples SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i)
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/distsql_numtables
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ INSERT INTO NumToSquare SELECT i, i*i FROM generate_series(1, 100) AS g(i)
statement ok
CREATE TABLE NumToStr (y INT PRIMARY KEY, str STRING)

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split into five parts.
statement ok
ALTER TABLE NumToStr SPLIT AT SELECT (i * 100 * 100 / 5)::int FROM generate_series(1, 4) AS g(i)
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/distsql_stats
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
statement ok
CREATE TABLE data (a INT, b INT, c FLOAT, d DECIMAL, PRIMARY KEY (a, b, c, d))

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split into ten parts.
statement ok
ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i)
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/distsql_tighten_spans
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,10 @@ INSERT INTO decimal_t VALUES
# Also split at the beginning of each index (0 for ASC, 100 for DESC) to
# prevent interfering with previous indexes/tables.

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# p1 table (interleaved index)
statement ok
ALTER TABLE p1 SPLIT AT VALUES(2)
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/distsql_union
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@ INSERT INTO xyz VALUES
(4, 2, 'b'),
(5, 2, 'c')

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

statement ok
ALTER TABLE xyz SPLIT AT VALUES (2), (3), (4), (5)

Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/explain_analyze
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,10 @@ CREATE TABLE kw (k INT PRIMARY KEY, w INT)
statement ok
INSERT INTO kw SELECT i, i FROM generate_series(1,5) AS g(i)

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split into 5 parts, each row from each table goes to one node.
statement ok
ALTER TABLE kv SPLIT AT SELECT i FROM generate_series(1,5) AS g(i)
Expand Down
3 changes: 3 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/pg_catalog
Original file line number Diff line number Diff line change
Expand Up @@ -1310,6 +1310,7 @@ default_transaction_isolation serializable NULL NULL NULL
default_transaction_read_only off NULL NULL NULL string
distsql off NULL NULL NULL string
experimental_force_lookup_join off NULL NULL NULL string
experimental_force_split_at off NULL NULL NULL string
experimental_force_zigzag_join off NULL NULL NULL string
experimental_serial_normalization rowid NULL NULL NULL string
extra_float_digits 0 NULL NULL NULL string
Expand Down Expand Up @@ -1346,6 +1347,7 @@ default_transaction_isolation serializable NULL user NULL serial
default_transaction_read_only off NULL user NULL off off
distsql off NULL user NULL off off
experimental_force_lookup_join off NULL user NULL off off
experimental_force_split_at off NULL user NULL off off
experimental_force_zigzag_join off NULL user NULL off off
experimental_serial_normalization rowid NULL user NULL rowid rowid
extra_float_digits 0 NULL user NULL 0 0
Expand Down Expand Up @@ -1382,6 +1384,7 @@ default_transaction_isolation NULL NULL NULL NULL NULL
default_transaction_read_only NULL NULL NULL NULL NULL
distsql NULL NULL NULL NULL NULL
experimental_force_lookup_join NULL NULL NULL NULL NULL
experimental_force_split_at NULL NULL NULL NULL NULL
experimental_force_zigzag_join NULL NULL NULL NULL NULL
experimental_opt NULL NULL NULL NULL NULL
experimental_serial_normalization NULL NULL NULL NULL NULL
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/ranges
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@ SHOW EXPERIMENTAL_RANGES FROM TABLE t
start_key end_key range_id replicas lease_holder
NULL NULL 1 {1} 1

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

statement ok
ALTER TABLE t SPLIT AT VALUES (1), (10)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ INSERT INTO t VALUES
(12, 0, 88, 0),
(13, 0, 13, 0)

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split the table across multiple ranges.
statement ok
ALTER TABLE t SPLIT AT VALUES (2)
Expand Down
1 change: 1 addition & 0 deletions pkg/sql/logictest/testdata/logic_test/show_source
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ default_transaction_isolation serializable
default_transaction_read_only off
distsql off
experimental_force_lookup_join off
experimental_force_split_at off
experimental_force_zigzag_join off
experimental_serial_normalization rowid
extra_float_digits 0
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/subquery
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,10 @@ CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT)
statement ok
INSERT INTO abc VALUES (1, 2, 3), (4, 5, 6)

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

statement ok
ALTER TABLE abc SPLIT AT VALUES ((SELECT 1))

Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/planner_test/distsql_agg
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
statement ok
CREATE TABLE data (a INT, b INT, c FLOAT, d DECIMAL, PRIMARY KEY (a, b, c, d))

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split into ten parts.
statement ok
ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i)
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/planner_test/distsql_distinct_on
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@ CREATE TABLE abc (
PRIMARY KEY (a, b, c)
)

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

statement ok
ALTER TABLE xyz SPLIT AT VALUES (2), (4), (6), (7)

Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/planner_test/distsql_indexjoin
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
statement ok
CREATE TABLE t (k INT PRIMARY KEY, v INT, w INT, INDEX v(v))

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split the index into 5 parts, as if numbers were in the range 1 to 100.
statement ok
ALTER INDEX t@v SPLIT AT SELECT (i * 10)::int FROM generate_series(1, 4) AS g(i)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,10 @@ INTERLEAVE IN PARENT child2 (pid1, cid2, cid3)
# Split our ranges #
####################

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split at parent1 key into five parts.
statement ok
ALTER TABLE parent1 SPLIT AT SELECT i FROM generate_series(8, 32, 8) AS g(i)
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/planner_test/distsql_join
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
statement ok
CREATE TABLE data (a INT, b INT, c INT, d INT, PRIMARY KEY (a, b, c, d))

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split into ten parts.
statement ok
ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i)
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/planner_test/distsql_lookup_join
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@ SET experimental_force_lookup_join = true;
statement ok
CREATE TABLE data (a INT, b INT, c INT, d INT, PRIMARY KEY (a, b, c, d))

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split into ten parts.
statement ok
ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i)
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/planner_test/distsql_misc
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,10 @@ subtest stats
statement ok
CREATE TABLE data (a INT, b INT, c FLOAT, d DECIMAL, PRIMARY KEY (a, b, c, d))

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split into ten parts.
statement ok
ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i)
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/planner_test/distsql_numtables
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ INSERT INTO NumToSquare SELECT i, i*i FROM generate_series(1, 100) AS g(i)
statement ok
CREATE TABLE NumToStr (y INT PRIMARY KEY, str STRING)

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split into five parts.
statement ok
ALTER TABLE NumToStr SPLIT AT SELECT (i * 100 * 100 / 5)::int FROM generate_series(1, 4) AS g(i)
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/logictest/testdata/planner_test/distsql_sort
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
statement ok
CREATE TABLE data (a INT, b INT, c INT, d INT, PRIMARY KEY (a, b, c, d))

# Prevent the merge queue from immediately discarding our splits.
statement ok
SET CLUSTER SETTING kv.range_merge.queue_enabled = false;

# Split into ten parts.
statement ok
ALTER TABLE data SPLIT AT SELECT i FROM generate_series(1, 9) AS g(i)
Expand Down
Loading