Skip to content

Commit

Permalink
Merge #46057 #46174
Browse files Browse the repository at this point in the history
46057: storage/metamorphic: Run pebble with different options r=itsbilal a=itsbilal

This change updates the MVCC metamorphic tests to run Pebble
with different, random options, as well as one configuration
that seems to uncover many issues (the one with low
TargetFileSizes on all levels).

Also fixes batchCommitOp to actually commit the batch
passed in.

Release note: None

Release justification: Safe for this release because it only
updates testing code.

46174: sql: remove experimental prefix from opt-driven FK settings r=RaduBerinde a=RaduBerinde

Release note (sql change): renamed `experimental_optimizer_foreign_keys` session
setting and `sql.defaults.optimizer_foreign_keys.enabled` cluster setting to
remove the experimental prefix.

Release justification: bug fixes and low-risk updates to new functionality.

Co-authored-by: Bilal Akhtar <[email protected]>
Co-authored-by: Radu Berinde <[email protected]>
  • Loading branch information
3 people committed Mar 17, 2020
3 parents c26cc79 + 2f3353f + b4ec5ce commit c4426dd
Show file tree
Hide file tree
Showing 18 changed files with 100 additions and 32 deletions.
2 changes: 1 addition & 1 deletion pkg/sql/copy_in_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,7 @@ func TestCopyFKCheck(t *testing.T) {
a INT PRIMARY KEY,
p INT REFERENCES p(p)
);
SET experimental_optimizer_foreign_keys = true;
SET optimizer_foreign_keys = true;
`)

txn, err := db.Begin()
Expand Down
4 changes: 2 additions & 2 deletions pkg/sql/exec_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,8 @@ var zigzagJoinClusterMode = settings.RegisterBoolSetting(
)

var optDrivenFKClusterMode = settings.RegisterBoolSetting(
"sql.defaults.experimental_optimizer_foreign_keys.enabled",
"default value for experimental_optimizer_foreign_keys session setting; enables optimizer-driven foreign key checks by default",
"sql.defaults.optimizer_foreign_keys.enabled",
"default value for optimizer_foreign_keys session setting; enables optimizer-driven foreign key checks by default",
true,
)

Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/logictest/testdata/logic_test/cascade
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# The tests in this file target the legacy FK paths.
statement ok
SET experimental_optimizer_foreign_keys = false
SET optimizer_foreign_keys = false

subtest AllCascadingActions
### A test of all cascading actions in their most basic form.
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/logictest/testdata/logic_test/cascade_opt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# The tests in this file target the new optimizer-driven FK paths (with
# fall back on the legacy paths for unsupported cases).
statement ok
SET experimental_optimizer_foreign_keys = true
SET optimizer_foreign_keys = true

subtest AllCascadingActions
### A test of all cascading actions in their most basic form.
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/logictest/testdata/logic_test/fk
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# The tests in this file target the legacy FK paths.
statement ok
SET experimental_optimizer_foreign_keys = false
SET optimizer_foreign_keys = false

# Disable automatic stats to avoid flakiness.
statement ok
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/logictest/testdata/logic_test/fk_opt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# The tests in this file target the new optimizer-driven FK paths (with
# fall back on the legacy paths for unsupported cases).
statement ok
SET experimental_optimizer_foreign_keys = true
SET optimizer_foreign_keys = true

# Randomize the use of insert fast path.
# The let statement will also log the value.
Expand Down
6 changes: 3 additions & 3 deletions pkg/sql/logictest/testdata/logic_test/pg_catalog
Original file line number Diff line number Diff line change
Expand Up @@ -1583,7 +1583,6 @@ enable_insert_fast_path on NULL NULL
enable_zigzag_join on NULL NULL NULL string
experimental_enable_hash_sharded_indexes off NULL NULL NULL string
experimental_enable_temp_tables off NULL NULL NULL string
experimental_optimizer_foreign_keys on NULL NULL NULL string
experimental_serial_normalization rowid NULL NULL NULL string
extra_float_digits 0 NULL NULL NULL string
force_savepoint_restart off NULL NULL NULL string
Expand All @@ -1595,6 +1594,7 @@ lock_timeout 0 NULL NULL
max_identifier_length 128 NULL NULL NULL string
max_index_keys 32 NULL NULL NULL string
node_id 1 NULL NULL NULL string
optimizer_foreign_keys on NULL NULL NULL string
reorder_joins_limit 4 NULL NULL NULL string
require_explicit_primary_keys off NULL NULL NULL string
results_buffer_size 16384 NULL NULL NULL string
Expand Down Expand Up @@ -1642,7 +1642,6 @@ enable_insert_fast_path on NULL user NUL
enable_zigzag_join on NULL user NULL on on
experimental_enable_hash_sharded_indexes off NULL user NULL off off
experimental_enable_temp_tables off NULL user NULL off off
experimental_optimizer_foreign_keys on NULL user NULL on on
experimental_serial_normalization rowid NULL user NULL rowid rowid
extra_float_digits 0 NULL user NULL 0 2
force_savepoint_restart off NULL user NULL off off
Expand All @@ -1654,6 +1653,7 @@ lock_timeout 0 NULL user NUL
max_identifier_length 128 NULL user NULL 128 128
max_index_keys 32 NULL user NULL 32 32
node_id 1 NULL user NULL 1 1
optimizer_foreign_keys on NULL user NULL on on
reorder_joins_limit 4 NULL user NULL 4 4
require_explicit_primary_keys off NULL user NULL off off
results_buffer_size 16384 NULL user NULL 16384 16384
Expand Down Expand Up @@ -1697,7 +1697,6 @@ enable_insert_fast_path NULL NULL NULL NULL
enable_zigzag_join NULL NULL NULL NULL NULL
experimental_enable_hash_sharded_indexes NULL NULL NULL NULL NULL
experimental_enable_temp_tables NULL NULL NULL NULL NULL
experimental_optimizer_foreign_keys NULL NULL NULL NULL NULL
experimental_serial_normalization NULL NULL NULL NULL NULL
extra_float_digits NULL NULL NULL NULL NULL
force_savepoint_restart NULL NULL NULL NULL NULL
Expand All @@ -1710,6 +1709,7 @@ max_identifier_length NULL NULL NULL NULL
max_index_keys NULL NULL NULL NULL NULL
node_id NULL NULL NULL NULL NULL
optimizer NULL NULL NULL NULL NULL
optimizer_foreign_keys NULL NULL NULL NULL NULL
reorder_joins_limit NULL NULL NULL NULL NULL
require_explicit_primary_keys NULL NULL NULL NULL NULL
results_buffer_size NULL NULL NULL NULL NULL
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/logictest/testdata/logic_test/show_source
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ enable_insert_fast_path on
enable_zigzag_join on
experimental_enable_hash_sharded_indexes off
experimental_enable_temp_tables off
experimental_optimizer_foreign_keys on
experimental_serial_normalization rowid
extra_float_digits 0
force_savepoint_restart off
Expand All @@ -54,6 +53,7 @@ lock_timeout 0
max_identifier_length 128
max_index_keys 32
node_id 1
optimizer_foreign_keys on
reorder_joins_limit 4
require_explicit_primary_keys off
results_buffer_size 16384
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/opt/bench/fk_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func runFKBench(
// Don't let auto stats interfere with the test. Stock stats are
// sufficient to get the right plans (i.e. lookup join).
r.Exec(b, "SET CLUSTER SETTING sql.stats.automatic_collection.enabled = false")
r.Exec(b, fmt.Sprintf("SET experimental_optimizer_foreign_keys = %v", cfg.optFKOn))
r.Exec(b, fmt.Sprintf("SET optimizer_foreign_keys = %v", cfg.optFKOn))
r.Exec(b, fmt.Sprintf("SET enable_insert_fast_path = %v", cfg.insertFastPath))
setup(b, r, cfg.setupFKs)
b.ResetTimer()
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/opt/exec/execbuilder/testdata/autocommit
Original file line number Diff line number Diff line change
Expand Up @@ -616,7 +616,7 @@ statement ok
CREATE TABLE fk_parent (p INT PRIMARY KEY, q INT, FAMILY f1 (p, q));
INSERT INTO fk_parent VALUES (1, 10), (2, 20), (3, 30);
CREATE TABLE fk_child (a INT, b INT REFERENCES fk_parent(p), FAMILY f1 (a, b));
SET experimental_optimizer_foreign_keys = true
SET optimizer_foreign_keys = true

# Populate table descriptor cache.
statement ok
Expand Down
6 changes: 3 additions & 3 deletions pkg/sql/opt/exec/execbuilder/testdata/explain_env
Original file line number Diff line number Diff line change
Expand Up @@ -87,12 +87,12 @@ EXPLAIN (OPT, ENV) SELECT * FROM y WHERE u = 3
https://cockroachdb.github.io/text/decode.html#eJxUj81O20AUhdfMUxyxwa5iAsqmSpTFYG7aaZ1xNJ5SIoQsx5nQKcFG4x_FrHgIP2GepEqTSmV57_3uOfrujKtsWYwRlvmzK7P81-0NzM7kq8Zu18ahNlWN9kgxFirimqD5TUTo4LGzBkLqz5CxhvwRRQN21p42xymMZaIVF1Lj_NXZl8x151goMedqie-0hNeAJ6H_Ed08p23qzCbdYRYrEl_kkW19KJqRIhlSgp2XHf6EvKV7dGmb2vUOXvsvb8bnIlr-V-s1A7Q-8yeM8UiTOnkcFC9fm9XW5pcdhPxGoUaiuRaJFmGCi4fHiwljCWk4U7q1cenv0hZVurUvtsYU11dXp7spstXWpG_26S17-kthinKzmTBG94uICwkvXugBSN75SCg6VH3CTMVzdPj5lRShwRSjCQuCIGBVnhXoGPZ9v-_f9_078rKoapfZoh5jeD3Gw3CEAMPRI_sTAAD___ayhqM=

statement ok
SET experimental_optimizer_foreign_keys = true
SET optimizer_foreign_keys = false

query T
EXPLAIN (OPT, ENV) SELECT * FROM y WHERE u = 3
----
https://cockroachdb.github.io/text/decode.html#eJxUj81O20AUhdfMUxyxwa5iAsqmSpTFYG7aaZ1xNJ5SIoQsx5nQKcFG4x_FrHgIP2GepEqTSmV57_3uOfrujKtsWYwRlvmzK7P81-0NzM7kq8Zu18ahNlWN9kgxFirimqD5TUTo4LGzBkLqz5CxhvwRRQN21p42xymMZaIVF1Lj_NXZl8x151goMedqie-0hNeAJ6H_Ed08p23qzCbdYRYrEl_kkW19KJqRIhlSgp2XHf6EvKV7dGmb2vUOXvsvb8bnIlr-V-s1A7Q-8yeM8UiTOnkcFC9fm9XW5pcdhPxGoUaiuRaJFmGCi4fHiwljCWk4U7q1cenv0hZVurUvtsYU11dXp7spstXWpG_26S17-kthinKzmTBG94uICwkvXugBSN75SCg6VH3CTMVzdPj5lRShwRSjCQuCIGBVnhXoGPZ9v-_f9_078rKoapfZoh5jeD3Gw3CEAMPRI_sTAAD___ayhqM=
https://cockroachdb.github.io/text/decode.html#eJxUj91u2jAYho_rq3jVkyZTU1pxMoE4SFOzeQsOcryuqKqsEBzmATFyfkQ46kVwhVzJxGDTdvh97_P9PM_aVcaWA0Q2Xzmb5T-eHqF3Op83Zr3QDrWuarRnipBI0FBSyPAxpujgkasGjMuP4IkE_xbHt-SqvXTOVZTwVIqQcYnrrTObzHXXmAo2CcUMX-kMXoMwjfz_0WKlWuV0oXYYJ4KyT_zMtj4EHVNBeURT7LzsNMf4E31Bp1plFjt47Z9943DC4tk_Z73mFq1P_CEhYSypuHicFO-2zXxt8rsOjH-hkUQqQ8lSyaIUN69vN0NCUirhtHUL7dRPa8pKrc3G1Bjh4f7-kusym6-12pvlPlv-pjCCLYpLbLe12Zi9dqqwTptlqVa6q_4i9GUah4zDS6byFpQ_-0hpfPrmA8YimaDD989UUDQYoT8kQRAEpMqzEh3B8XA4Ht6Ph3fktqxql5myHqD3MMBrr48Avf4b-RUAAP__GCuS_Q==

statement ok
RESET reorder_joins_limit
Expand All @@ -101,7 +101,7 @@ statement ok
RESET enable_zigzag_join

statement ok
RESET experimental_optimizer_foreign_keys
RESET optimizer_foreign_keys

#
# Test sequences.
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/opt/exec/execbuilder/testdata/fk_opt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# LogicTest: local

statement ok
SET experimental_optimizer_foreign_keys = true
SET optimizer_foreign_keys = true

# We will test the fast path later.
statement ok
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/opt_exec_factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -1095,7 +1095,7 @@ FROM

for _, param := range []string{
"enable_zigzag_join",
"experimental_optimizer_foreign_keys",
"optimizer_foreign_keys",
} {
value, err := ef.environmentQuery(fmt.Sprintf("SHOW %s", param))
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions pkg/sql/vars.go
Original file line number Diff line number Diff line change
Expand Up @@ -447,8 +447,8 @@ var varGen = map[string]sessionVar{
},

// CockroachDB extension.
`experimental_optimizer_foreign_keys`: {
GetStringVal: makeBoolGetStringValFn(`experimental_optimizer_foreign_keys`),
`optimizer_foreign_keys`: {
GetStringVal: makeBoolGetStringValFn(`optimizer_foreign_keys`),
Set: func(_ context.Context, m *sessionDataMutator, s string) error {
b, err := parsePostgresBool(s)
if err != nil {
Expand Down
77 changes: 72 additions & 5 deletions pkg/storage/metamorphic/generator.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func makeStorageConfig(path string) base.StorageConfig {
}
}

func createTestRocksDBEngine(path string) (storage.Engine, error) {
func createTestRocksDBEngine(path string, seed int64) (storage.Engine, error) {
cache := storage.NewRocksDBCache(1 << 20)
defer cache.Release()
cfg := storage.RocksDBConfig{
Expand All @@ -47,7 +47,7 @@ func createTestRocksDBEngine(path string) (storage.Engine, error) {
return storage.NewRocksDB(cfg, cache)
}

func createTestPebbleEngine(path string) (storage.Engine, error) {
func createTestPebbleEngine(path string, seed int64) (storage.Engine, error) {
pebbleConfig := storage.PebbleConfig{
StorageConfig: makeStorageConfig(path),
Opts: storage.DefaultPebbleOptions(),
Expand All @@ -58,9 +58,74 @@ func createTestPebbleEngine(path string) (storage.Engine, error) {
return storage.NewPebble(context.Background(), pebbleConfig)
}

func createTestPebbleManySSTs(path string, seed int64) (storage.Engine, error) {
pebbleConfig := storage.PebbleConfig{
StorageConfig: makeStorageConfig(path),
Opts: storage.DefaultPebbleOptions(),
}
levels := pebbleConfig.Opts.Levels
for i := range levels {
if i == 0 {
levels[i].TargetFileSize = 1 << 8 // 256 bytes
} else {
levels[i].TargetFileSize = levels[i-1].TargetFileSize * 2
}
}
pebbleConfig.Opts.Cache = pebble.NewCache(1 << 20)
defer pebbleConfig.Opts.Cache.Unref()

return storage.NewPebble(context.Background(), pebbleConfig)
}

func rngIntRange(rng *rand.Rand, min int64, max int64) int64 {
return min + rng.Int63n(max)
}

func createTestPebbleVarOpts(path string, seed int64) (storage.Engine, error) {
opts := storage.DefaultPebbleOptions()

rng := rand.New(rand.NewSource(seed))
opts.BytesPerSync = 1 << rngIntRange(rng, 8, 30)
opts.LBaseMaxBytes = 1 << rngIntRange(rng, 8, 30)
opts.L0CompactionThreshold = int(rngIntRange(rng, 1, 10))
opts.L0StopWritesThreshold = int(rngIntRange(rng, 1, 32))
if opts.L0StopWritesThreshold < opts.L0CompactionThreshold {
opts.L0StopWritesThreshold = opts.L0CompactionThreshold
}
for i := range opts.Levels {
if i == 0 {
opts.Levels[i].BlockRestartInterval = int(rngIntRange(rng, 1, 64))
opts.Levels[i].BlockSize = 1 << rngIntRange(rng, 1, 20)
opts.Levels[i].BlockSizeThreshold = int(rngIntRange(rng, 50, 100))
opts.Levels[i].IndexBlockSize = opts.Levels[i].BlockSize
opts.Levels[i].TargetFileSize = 1 << rngIntRange(rng, 1, 20)
} else {
opts.Levels[i] = opts.Levels[i-1]
opts.Levels[i].TargetFileSize = opts.Levels[i-1].TargetFileSize * 2
}
}
opts.MaxManifestFileSize = 1 << rngIntRange(rng, 1, 28)
opts.MaxOpenFiles = int(rngIntRange(rng, 20, 2000))
opts.MemTableSize = 1 << rngIntRange(rng, 10, 28)
opts.MemTableStopWritesThreshold = int(rngIntRange(rng, 2, 7))
opts.MinCompactionRate = int(rngIntRange(rng, 1<<8, 8<<20))
opts.MinFlushRate = int(rngIntRange(rng, 1<<8, 4<<20))
opts.MaxConcurrentCompactions = int(rngIntRange(rng, 1, 4))

opts.Cache = pebble.NewCache(1 << rngIntRange(rng, 1, 30))
defer opts.Cache.Unref()

pebbleConfig := storage.PebbleConfig{
StorageConfig: makeStorageConfig(path),
Opts: opts,
}

return storage.NewPebble(context.Background(), pebbleConfig)
}

type engineImpl struct {
name string
create func(path string) (storage.Engine, error)
create func(path string, seed int64) (storage.Engine, error)
}

var _ fmt.Stringer = &engineImpl{}
Expand All @@ -71,6 +136,8 @@ func (e *engineImpl) String() string {

var engineImplRocksDB = engineImpl{"rocksdb", createTestRocksDBEngine}
var engineImplPebble = engineImpl{"pebble", createTestPebbleEngine}
var engineImplPebbleManySSTs = engineImpl{"pebble_many_ssts", createTestPebbleManySSTs}
var engineImplPebbleVarOpts = engineImpl{"pebble_var_opts", createTestPebbleVarOpts}

// Object to store info corresponding to one metamorphic test run. Responsible
// for generating and executing operations.
Expand Down Expand Up @@ -110,7 +177,7 @@ func (m *metaTestRunner) init() {
m.curEngine = 0

var err error
m.engine, err = m.engineImpls[0].create(m.path)
m.engine, err = m.engineImpls[0].create(m.path, m.seed)
if err != nil {
m.t.Fatal(err)
}
Expand Down Expand Up @@ -281,7 +348,7 @@ func (m *metaTestRunner) restart() (string, string) {
}

var err error
m.engine, err = m.engineImpls[m.curEngine].create(m.path)
m.engine, err = m.engineImpls[m.curEngine].create(m.path, m.seed)
if err != nil {
m.t.Fatal(err)
}
Expand Down
3 changes: 3 additions & 0 deletions pkg/storage/metamorphic/meta_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,8 @@ func TestRocksPebbleEquivalence(t *testing.T) {
engineSequences: [][]engineImpl{
{engineImplRocksDB},
{engineImplPebble},
{engineImplPebbleManySSTs},
{engineImplPebbleVarOpts},
},
}
runMetaTest(run)
Expand Down Expand Up @@ -206,6 +208,7 @@ func TestRocksPebbleRestarts(t *testing.T) {
{engineImplRocksDB},
{engineImplPebble},
{engineImplRocksDB, engineImplPebble},
{engineImplRocksDB, engineImplPebbleManySSTs, engineImplPebbleVarOpts},
},
}
runMetaTest(run)
Expand Down
7 changes: 1 addition & 6 deletions pkg/storage/metamorphic/operands.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,12 +167,7 @@ func (v *valueGenerator) toString(value []byte) string {
}

func (v *valueGenerator) parse(input string) []byte {
var value = make([]byte, 0, maxValueSize)
_, err := fmt.Sscanf(input, "%s", &value)
if err != nil {
panic(err)
}
return value
return []byte(input)
}

type txnID string
Expand Down
5 changes: 4 additions & 1 deletion pkg/storage/metamorphic/operations.go
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,10 @@ func (b batchCommitOp) run(ctx context.Context) string {
if b.id == "engine" {
return "noop"
}
batch := b.m.getReadWriter(b.id)
batch := b.m.getReadWriter(b.id).(storage.Batch)
if err := batch.Commit(true); err != nil {
return err.Error()
}
batch.Close()
delete(b.m.openBatches, b.id)
return "ok"
Expand Down

0 comments on commit c4426dd

Please sign in to comment.