diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_remote_access_error b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_remote_access_error index 721259313630..0bfbc0020623 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_remote_access_error +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_remote_access_error @@ -1,5 +1,5 @@ # tenant-cluster-setting-override-opt: allow-multi-region-abstractions-for-secondary-tenants -# LogicTest: multiregion-9node-3region-3azs !metamorphic +# LogicTest: multiregion-9node-3region-3azs !metamorphic-batch-sizes # Set the closed timestamp interval to be short to shorten the amount of time # we need to wait for the system config to propagate. diff --git a/pkg/ccl/logictestccl/testdata/logic_test/partitioning_hash_sharded_index_query_plan b/pkg/ccl/logictestccl/testdata/logic_test/partitioning_hash_sharded_index_query_plan index 44438b606e5b..134fbbe9b49f 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/partitioning_hash_sharded_index_query_plan +++ b/pkg/ccl/logictestccl/testdata/logic_test/partitioning_hash_sharded_index_query_plan @@ -1,4 +1,4 @@ -# LogicTest: 5node !metamorphic +# LogicTest: 5node !metamorphic-batch-sizes statement ok SET experimental_enable_implicit_column_partitioning = true; diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_hash_sharded_index_query_plan b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_hash_sharded_index_query_plan index 1a5b1991f86e..b21a127da9f1 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_hash_sharded_index_query_plan +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_hash_sharded_index_query_plan @@ -1,5 +1,5 @@ # tenant-cluster-setting-override-opt: allow-multi-region-abstractions-for-secondary-tenants -# LogicTest: multiregion-9node-3region-3azs !metamorphic +# LogicTest: multiregion-9node-3region-3azs !metamorphic-batch-sizes # TODO(#75864): enable multiregion-9node-3region-3azs-tenant statement ok diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior index 8c432a9d98a6..70f3243a1ae9 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior @@ -1,5 +1,5 @@ # tenant-cluster-setting-override-opt: allow-multi-region-abstractions-for-secondary-tenants -# LogicTest: multiregion-9node-3region-3azs !metamorphic +# LogicTest: multiregion-9node-3region-3azs !metamorphic-batch-sizes # TODO(#75864): enable multiregion-9node-3region-3azs-tenant and/or revert # the commit that split these changes out. @@ -290,7 +290,7 @@ pk pk2 a b j # Test that a limited, ordered scan is efficient. -query T +query T retry SELECT * FROM [EXPLAIN (VERBOSE) SELECT * FROM regional_by_row_table ORDER BY pk LIMIT 5] OFFSET 2 ---- @@ -335,7 +335,7 @@ ORDER BY pk LIMIT 5] OFFSET 2 # Test that the synthesized UNIQUE WITHOUT INDEX constraints do not cause # lookups into redundant arbiters. -query T +query T retry SELECT * FROM [ EXPLAIN INSERT INTO regional_by_row_table (crdb_region, pk, pk2, a, b) VALUES ('ca-central-1', 7, 7, 8, 9) ON CONFLICT DO NOTHING @@ -455,7 +455,7 @@ SET locality_optimized_partitioned_index_scan = true statement ok SET vectorize=on -query T +query T retry SELECT * FROM [EXPLAIN (DISTSQL) SELECT * FROM regional_by_row_table WHERE pk = 1] OFFSET 2 ---- · @@ -490,7 +490,7 @@ EXPLAIN (VEC) SELECT * FROM regional_by_row_table WHERE pk = 1 statement ok SET vectorize=off -query T +query T retry SELECT * FROM [EXPLAIN (DISTSQL) SELECT * FROM regional_by_row_table WHERE pk = 1] AS temp(a) WHERE a LIKE '%Diagram%' ---- Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJy8keFr1EAQxb_7VwwDpa2sJBu_SEBoaSMG47VeDhR64di7Hc71kt11d4MtR_53uUQ4IxdBBT_Oe_Mm75fdo_9aY4plVmQ3C3gOb-Z37-Eh-3RfXOczuLjNy0X5obiE8YKjrTJa1Kv108qZb6sg1jXBx7fZPAO7g9fAK7guIVBj4UJc_nAEFPm7DM7PbpXYOtGcnSNDbSTNREMe0wfkWDG0zmzIe-MO0r5fyOUjpjFDpW0bDnLFcGMcYbrHoEJNmOLi0GFOQpKLYmQoKQhV92dP1r06qa7sjp6Q4Y2p20b7FOyOgd0lDASDNYMvyLC04uBES1wuH1_FS4x4FIPQEjiY8JkcVh1D04ZjWR_EljDlHfs7IP6fgK4GmEmAZBLg2NuTU6KGVhsnyZEcVa-6E6Qz88LYKBkzFqpRAfhklfhP_uWcvDXa0y9dpi5XDEluaeDypnUbundm039mGO_6XC9I8mFwk2HIdW_1j_1zmP9LOPlt-OUoHHdV9-x7AAAA__-eCkLr @@ -556,7 +556,7 @@ RESET vectorize # The local region for this query is ca-central-1, so that span should be # scanned in the first child of the limited union all. -query T nodeidx=3 +query T nodeidx=3 retry USE multi_region_test_db; SET locality_optimized_partitioned_index_scan = true; SELECT * FROM [EXPLAIN SELECT * FROM regional_by_row_table WHERE pk = 1] OFFSET 2 ---- @@ -576,7 +576,7 @@ SELECT * FROM [EXPLAIN SELECT * FROM regional_by_row_table WHERE pk = 1] OFFSET # Query with more than one key. -query T +query T retry SELECT * FROM [EXPLAIN SELECT * FROM regional_by_row_table WHERE pk IN (1, 4)] OFFSET 2 ---- · @@ -777,7 +777,7 @@ statement ok SET locality_optimized_partitioned_index_scan = true # Anti join with locality optimized search enabled. -query T +query T retry SELECT * FROM [EXPLAIN (DISTSQL) SELECT * FROM child WHERE NOT EXISTS (SELECT * FROM parent WHERE p_id = c_p_id) AND c_id = 10] OFFSET 2 ---- · @@ -858,7 +858,7 @@ Scan /Table/111/1/"\x80"/20/0, /Table/111/1/"\xc0"/20/0 fetched: /parent/parent_pkey/'ca-central-1'/20 -> # Semi join with locality optimized search enabled. -query T +query T retry SELECT * FROM [EXPLAIN (DISTSQL) SELECT * FROM child WHERE EXISTS (SELECT * FROM parent WHERE p_id = c_p_id) AND c_id = 10] OFFSET 2 ---- · @@ -936,7 +936,7 @@ fetched: /parent/parent_pkey/'ca-central-1'/20 -> output row: [20 20] # Inner join with locality optimized search enabled. -query T +query T retry SELECT * FROM [EXPLAIN (DISTSQL) SELECT * FROM child INNER JOIN parent ON p_id = c_p_id WHERE c_id = 10] OFFSET 2 ---- · @@ -1014,7 +1014,7 @@ fetched: /parent/parent_pkey/'ca-central-1'/20 -> output row: [20 20 20] # Left join with locality optimized search enabled. -query T +query T retry SELECT * FROM [EXPLAIN (DISTSQL) SELECT * FROM child LEFT JOIN parent ON p_id = c_p_id WHERE c_id = 10] OFFSET 2 ---- · @@ -1091,7 +1091,7 @@ Scan /Table/111/1/"\x80"/20/0, /Table/111/1/"\xc0"/20/0 fetched: /parent/parent_pkey/'ca-central-1'/20 -> output row: [20 20 20] -query T +query T retry SELECT * FROM [EXPLAIN INSERT INTO child VALUES (1, 1)] OFFSET 2 ---- · @@ -1141,7 +1141,7 @@ SELECT * FROM [EXPLAIN INSERT INTO child VALUES (1, 1)] OFFSET 2 # Non-constant insert values cannot be inlined in uniqueness check, and all # regions must be searched for duplicates. -query T +query T retry SELECT * FROM [EXPLAIN INSERT INTO child VALUES (1, 1), (2, 2)] OFFSET 2 ---- · @@ -1189,7 +1189,7 @@ SELECT * FROM [EXPLAIN INSERT INTO child VALUES (1, 1), (2, 2)] OFFSET 2 estimated row count: 2 label: buffer 1 -query T +query T retry SELECT * FROM [EXPLAIN UPSERT INTO child VALUES (1, 1)] OFFSET 2 ---- · @@ -1311,7 +1311,7 @@ ALTER TABLE t56201 INJECT STATISTICS '[ statement ok ALTER TABLE t56201 ADD CONSTRAINT key_a_b UNIQUE (a, b); -query T +query T retry SELECT * FROM [EXPLAIN (VERBOSE) SELECT a, b FROM t56201 WHERE a IS NOT NULL AND b IS NOT NULL @@ -1391,7 +1391,7 @@ LIMIT 1] OFFSET 2 statement ok CREATE UNIQUE INDEX key_b_partial ON t56201 (b) WHERE a > 0; -query T +query T retry SELECT * FROM [EXPLAIN (VERBOSE) SELECT b FROM t56201@key_b_partial WHERE b IS NOT NULL AND a > 0 @@ -1457,7 +1457,7 @@ LIMIT 1] OFFSET 2 statement ok CREATE UNIQUE INDEX key_c_partial ON t56201 (c) WHERE a = 1; -query T +query T retry SELECT * FROM [EXPLAIN (VERBOSE) SELECT c FROM t56201 WHERE c IS NOT NULL AND a = 1 @@ -1521,7 +1521,7 @@ ALTER TABLE regional_by_row_table ADD CONSTRAINT unique_b_a UNIQUE(b, a) # We should plan uniqueness checks for all unique indexes in # REGIONAL BY ROW tables. -query T +query T retry SELECT * FROM [EXPLAIN INSERT INTO regional_by_row_table (pk, pk2, a, b) VALUES (1, 1, 1, 1)] OFFSET 2 ---- · @@ -1639,7 +1639,7 @@ INSERT INTO regional_by_row_table (crdb_region, pk, pk2, a, b) VALUES ('us-east- # TODO(treilly): The constraint check for uniq_idx should use uniq_idx but due # to stats issues w/ empty stats, partial indexes and multicol stats its not. # Hopefully fixing #67583 (and possibly #67479) will resolve this. -query T +query T retry SELECT * FROM [EXPLAIN UPSERT INTO regional_by_row_table (crdb_region, pk, pk2, a, b) VALUES ('us-east-1', 2, 3, 2, 3)] OFFSET 2 ---- · @@ -1714,7 +1714,7 @@ SELECT * FROM [EXPLAIN UPSERT INTO regional_by_row_table (crdb_region, pk, pk2, # TODO(treilly): The constraint check for uniq_idx should use uniq_idx but due # to stats issues w/ empty stats, partial indexes and multicol stats its not. # Hopefully fixing #67583 (and possibly #67479) will resolve this. -query T +query T retry SELECT * FROM [EXPLAIN UPSERT INTO regional_by_row_table (crdb_region, pk, pk2, a, b) VALUES ('us-east-1', 23, 24, 25, 26), ('ca-central-1', 30, 30, 31, 32)] OFFSET 2 ---- @@ -1888,7 +1888,7 @@ pk a b crdb_region_col # We do not need uniqueness checks on pk since uniqueness can be inferred # through the functional dependency between pk and the computed region column. -query T +query T retry SELECT * FROM [EXPLAIN INSERT INTO regional_by_row_table_as (pk, a, b) VALUES (1, 1, 1)] OFFSET 2 ---- · @@ -1961,7 +1961,7 @@ CREATE TABLE regional_by_row_table_virt ( ) LOCALITY REGIONAL BY ROW # Uniqueness checks for virtual columns should be efficient. -query T +query T retry SELECT * FROM [EXPLAIN INSERT INTO regional_by_row_table_virt (pk, a, b) VALUES (1, 1, 1)] OFFSET 2 ---- · @@ -2034,7 +2034,7 @@ SELECT * FROM [EXPLAIN INSERT INTO regional_by_row_table_virt (pk, a, b) VALUES table: regional_by_row_table_virt@regional_by_row_table_virt_expr_key spans: [/'ap-southeast-2'/11 - /'ap-southeast-2'/11] [/'ca-central-1'/11 - /'ca-central-1'/11] [/'us-east-1'/11 - /'us-east-1'/11] -query T +query T retry SELECT * FROM [EXPLAIN UPSERT INTO regional_by_row_table_virt (pk, a, b) VALUES (1, 1, 1)] OFFSET 2 ---- · @@ -2123,7 +2123,7 @@ CREATE TABLE regional_by_row_table_virt_partial ( ) LOCALITY REGIONAL BY ROW # Uniqueness checks for virtual columns should be efficient. -query T +query T retry SELECT * FROM [EXPLAIN INSERT INTO regional_by_row_table_virt_partial (pk, a, b) VALUES (1, 1, 1)] OFFSET 2 ---- · @@ -2212,7 +2212,7 @@ SELECT * FROM [EXPLAIN INSERT INTO regional_by_row_table_virt_partial (pk, a, b) table: regional_by_row_table_virt_partial@regional_by_row_table_virt_partial_pkey spans: [/'ap-southeast-2' - /'ap-southeast-2'] [/'ca-central-1' - /'us-east-1'] -query T +query T retry SELECT * FROM [EXPLAIN UPSERT INTO regional_by_row_table_virt_partial (pk, a, b) VALUES (1, 1, 1)] OFFSET 2 ---- · @@ -2399,7 +2399,7 @@ statement ok SET database = multi_region_test_db # LIMIT clause enables locality optimized scan on a REGIONAL BY ROW table -query T +query T retry SELECT * FROM [ EXPLAIN SELECT pk, pk2, a, b, crdb_region @@ -2460,7 +2460,37 @@ FROM statement ok SET vectorize = "on" -statement ok nodeidx=0 +query T retry +EXPLAIN(OPT) SELECT + count(*) +FROM + ( + SELECT + * + FROM + regional_by_row_table_as4@a_idx + WHERE + a BETWEEN 1 AND 100 + LIMIT + 10 + ) +---- +scalar-group-by + ├── locality-optimized-search + │ ├── scan regional_by_row_table_as4@a_idx + │ │ ├── constraint: /10/9/8: [/'ap-southeast-2'/1 - /'ap-southeast-2'/100] + │ │ ├── limit: 10 + │ │ └── flags: force-index=a_idx + │ └── scan regional_by_row_table_as4@a_idx + │ ├── constraint: /15/14/13 + │ │ ├── [/'ca-central-1'/1 - /'ca-central-1'/100] + │ │ └── [/'us-east-1'/1 - /'us-east-1'/100] + │ ├── limit: 10 + │ └── flags: force-index=a_idx + └── aggregations + └── count-rows + +statement ok SET database = multi_region_test_db; SET TRACING = "on", kv, results; SELECT @@ -2552,7 +2582,7 @@ statement ok SET vectorize = "on" # Locality optimized scan with an IN list -query T +query T retry SELECT * FROM @@ -2705,7 +2735,7 @@ statement ok SET vectorize = "on" # Locality optimized scan with multiple range predicates -query T +query T retry SELECT * FROM @@ -2872,7 +2902,7 @@ statement ok INSERT INTO regional_by_row_table_as1 (pk) VALUES (1), (2), (3), (10), (20) # An extra crdb_region check constraint should still allow locality optimized scan. -query T +query T retry SELECT * FROM [EXPLAIN SELECT * FROM regional_by_row_table_as1 LIMIT 3] OFFSET 2 ---- · @@ -2912,7 +2942,7 @@ CREATE TABLE users ( ) LOCALITY REGIONAL BY ROW # Check that we don't recommend indexes that already exist. -query T +query T retry EXPLAIN INSERT INTO users (name, email) VALUES ('Craig Roacher', 'craig@cockroachlabs.com') ---- @@ -2981,7 +3011,7 @@ CREATE TABLE user_settings_cascades ( # users.id = user_settings.user_id AND users.crdb_region = user_settings.crdb_region # This would allow the optimizer to plan a lookup join between users and user_settings # and avoid visiting all regions. See #69617. -query T +query T retry EXPLAIN SELECT users.crdb_region AS user_region, user_settings.crdb_region AS user_settings_region, * FROM users JOIN user_settings ON users.id = user_settings.user_id AND users.id = '5ebfedee-0dcf-41e6-a315-5fa0b51b9882'; ---- @@ -3038,7 +3068,7 @@ vectorized: true FK check: users@users_pkey size: 5 columns, 1 row -query T +query T retry EXPLAIN DELETE FROM users WHERE id = '5ebfedee-0dcf-41e6-a315-5fa0b51b9882' ---- distribution: local diff --git a/pkg/col/coldata/batch.go b/pkg/col/coldata/batch.go index fd8581f8d391..eea39f43f585 100644 --- a/pkg/col/coldata/batch.go +++ b/pkg/col/coldata/batch.go @@ -82,6 +82,9 @@ type Batch interface { var _ Batch = &MemBatch{} +// DefaultColdataBatchSize is the default value of coldata-batch-size. +const DefaultColdataBatchSize = 1024 + // defaultBatchSize is the size of batches that is used in the non-test setting. // Initially, 1024 was picked based on MonetDB/X100 paper and was later // confirmed to be very good using tpchvec/bench benchmark on TPC-H queries @@ -89,7 +92,7 @@ var _ Batch = &MemBatch{} // better, so we decided to keep 1024 as it is a power of 2). var defaultBatchSize = int64(util.ConstantWithMetamorphicTestRange( "coldata-batch-size", - 1024, /* defaultValue */ + DefaultColdataBatchSize, /* defaultValue */ // min is set to 3 to match colexec's minBatchSize setting. 3, /* min */ MaxBatchSize, diff --git a/pkg/config/system.go b/pkg/config/system.go index a126e0543f1c..20afd34a6a52 100644 --- a/pkg/config/system.go +++ b/pkg/config/system.go @@ -402,6 +402,22 @@ func (s *SystemConfig) GetZoneConfigForObject( return entry.combined, nil } +// PurgeZoneConfigCache allocates a new zone config cache in this system config +// so that tables with stale zone config information could have this info +// looked up from using the most up-to-date zone config the next time it's +// requested. Note, this function is only intended to be called during test +// execution, such as logic tests. +func (s *SystemConfig) PurgeZoneConfigCache() { + s.mu.RLock() + if len(s.mu.zoneCache) != 0 { + s.mu.zoneCache = map[ObjectID]zoneEntry{} + } + if len(s.mu.shouldSplitCache) != 0 { + s.mu.shouldSplitCache = map[ObjectID]bool{} + } + s.mu.RUnlock() +} + // getZoneEntry returns the zone entry for the given system-tenant // object ID. In the fast path, the zone is already in the cache, and is // directly returned. Otherwise, getZoneEntry will hydrate new diff --git a/pkg/sql/logictest/BUILD.bazel b/pkg/sql/logictest/BUILD.bazel index ce0ce031d09b..ac1d0147f1ff 100644 --- a/pkg/sql/logictest/BUILD.bazel +++ b/pkg/sql/logictest/BUILD.bazel @@ -29,6 +29,7 @@ go_library( "//pkg/base", "//pkg/cloud/externalconn/providers", "//pkg/clusterversion", + "//pkg/col/coldata", "//pkg/kv/kvclient/rangefeed", "//pkg/kv/kvserver", "//pkg/kv/kvserver/kvserverbase", diff --git a/pkg/sql/logictest/logic.go b/pkg/sql/logictest/logic.go index 41a81b511011..8b4adf4a6395 100644 --- a/pkg/sql/logictest/logic.go +++ b/pkg/sql/logictest/logic.go @@ -40,6 +40,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" _ "github.com/cockroachdb/cockroach/pkg/cloud/externalconn/providers" // imported to register ExternalConnection providers "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" @@ -150,9 +151,9 @@ import ( // A link to the issue will be printed out if the -print-blocklist-issues flag // is specified. // -// There is a special directive '!metamorphic' that adjusts the server to force -// the usage of production values for some constants that might change via the -// metamorphic testing. +// There is a special directive '!metamorphic-batch-sizes' that adjusts the +// server to force the usage of production values related for some constants, +// mostly related to batch sizes, that might change via metamorphic testing. // // // ########################################################### @@ -2242,6 +2243,14 @@ func fetchSubtests(path string) ([]subtestDetails, error) { return subtests, nil } +func (t *logicTest) purgeZoneConfig() { + for i := 0; i < t.cluster.NumServers(); i++ { + sysconfigProvider := t.cluster.Server(i).SystemConfigProvider() + sysconfig := sysconfigProvider.GetSystemConfig() + sysconfig.PurgeZoneConfigCache() + } +} + func (t *logicTest) processSubtest( subtest subtestDetails, path string, config logictestbase.TestClusterConfig, rng *rand.Rand, ) error { @@ -2746,12 +2755,14 @@ func (t *logicTest) processSubtest( for i := 0; i < repeat; i++ { if query.retry && !*rewriteResultsInTestfiles { if err := testutils.SucceedsSoonError(func() error { + t.purgeZoneConfig() return t.execQuery(query) }); err != nil { t.Error(err) } } else { if query.retry && *rewriteResultsInTestfiles { + t.purgeZoneConfig() // The presence of the retry flag indicates that we expect this // query may need some time to succeed. If we are rewriting, wait // 500ms before executing the query. @@ -3859,7 +3870,8 @@ func RunLogicTest( } // Check whether the test can only be run in non-metamorphic mode. - _, onlyNonMetamorphic := logictestbase.ReadTestFileConfigs(t, path, logictestbase.ConfigSet{configIdx}) + _, nonMetamorphicBatchSizes := + logictestbase.ReadTestFileConfigs(t, path, logictestbase.ConfigSet{configIdx}) config := logictestbase.LogicTestConfigs[configIdx] // The tests below are likely to run concurrently; `log` is shared @@ -3911,7 +3923,12 @@ func RunLogicTest( } // Each test needs a copy because of Parallel serverArgsCopy := serverArgs - serverArgsCopy.ForceProductionValues = serverArgs.ForceProductionValues || onlyNonMetamorphic + serverArgsCopy.ForceProductionValues = serverArgs.ForceProductionValues || nonMetamorphicBatchSizes + if serverArgsCopy.ForceProductionValues { + if err := coldata.SetBatchSizeForTests(coldata.DefaultColdataBatchSize); err != nil { + panic(errors.Wrapf(err, "Could not set batch size for test.")) + } + } lt.setup( config, serverArgsCopy, readClusterOptions(t, path), readKnobOptions(t, path), readTenantClusterSettingOverrideArgs(t, path), ) diff --git a/pkg/sql/logictest/logictestbase/BUILD.bazel b/pkg/sql/logictest/logictestbase/BUILD.bazel index 8125bcc75765..1156124d8ef4 100644 --- a/pkg/sql/logictest/logictestbase/BUILD.bazel +++ b/pkg/sql/logictest/logictestbase/BUILD.bazel @@ -10,7 +10,6 @@ go_library( deps = [ "//pkg/build", "//pkg/roachpb", - "//pkg/util", ], ) diff --git a/pkg/sql/logictest/logictestbase/logictestbase.go b/pkg/sql/logictest/logictestbase/logictestbase.go index a4fb3743cf28..8ee7e5cdad80 100644 --- a/pkg/sql/logictest/logictestbase/logictestbase.go +++ b/pkg/sql/logictest/logictestbase/logictestbase.go @@ -23,7 +23,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/build" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/util" ) var ( @@ -588,7 +587,7 @@ func (l stdlogger) Logf(format string, args ...interface{}) { // If the file doesn't contain a directive, the default config is returned. func ReadTestFileConfigs( t logger, path string, defaults ConfigSet, -) (_ ConfigSet, onlyNonMetamorphic bool) { +) (_ ConfigSet, nonMetamorphicBatchSizes bool) { file, err := os.Open(path) if err != nil { return nil, false @@ -612,8 +611,8 @@ func ReadTestFileConfigs( if len(fields) == 2 { t.Fatalf("%s: empty LogicTest directive", path) } - cs, onlyNonMetamorphic := processConfigs(t, path, defaults, fields[2:]) - return cs, onlyNonMetamorphic + cs, nonMetamorphicBatchSizes := processConfigs(t, path, defaults, fields[2:]) + return cs, nonMetamorphicBatchSizes } } // No directive found, return the default config. @@ -639,10 +638,11 @@ func getBlocklistIssueNo(blocklistDirective string) (string, int) { // processConfigs, given a list of configNames, returns the list of // corresponding logicTestConfigIdxs as well as a boolean indicating whether -// the test works only in non-metamorphic setting. +// metamorphic settings related to batch sizes should be overridden with default +// production values. func processConfigs( t logger, path string, defaults ConfigSet, configNames []string, -) (_ ConfigSet, onlyNonMetamorphic bool) { +) (_ ConfigSet, nonMetamorphicBatchSizes bool) { const blocklistChar = '!' // blocklist is a map from a blocked config to a corresponding issue number. // If 0, there is no associated issue. @@ -670,12 +670,12 @@ func processConfigs( } } - if _, ok := blocklist["metamorphic"]; ok && util.IsMetamorphicBuild() { - onlyNonMetamorphic = true + if _, ok := blocklist["metamorphic-batch-sizes"]; ok { + nonMetamorphicBatchSizes = true } if len(blocklist) != 0 && allConfigNamesAreBlocklistDirectives { // No configs specified, this blocklist applies to the default configs. - return applyBlocklistToConfigs(defaults, blocklist), onlyNonMetamorphic + return applyBlocklistToConfigs(defaults, blocklist), nonMetamorphicBatchSizes } var configs ConfigSet @@ -704,7 +704,7 @@ func processConfigs( } } - return configs, onlyNonMetamorphic + return configs, nonMetamorphicBatchSizes } // applyBlocklistToConfigs applies the given blocklist to configs, returning the diff --git a/pkg/sql/logictest/testdata/logic_test/create_as_non_metamorphic b/pkg/sql/logictest/testdata/logic_test/create_as_non_metamorphic index 3c3f68241876..a2d1a06ef397 100644 --- a/pkg/sql/logictest/testdata/logic_test/create_as_non_metamorphic +++ b/pkg/sql/logictest/testdata/logic_test/create_as_non_metamorphic @@ -1,4 +1,4 @@ -# LogicTest: !metamorphic +# LogicTest: !metamorphic-batch-sizes # Disabled to allow us to validate create as with large batch sizes. # Regression test for #81554, where tried to do gigantic batches for CTAS in diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_automatic_stats b/pkg/sql/logictest/testdata/logic_test/distsql_automatic_stats index ed1d85ac7c4e..9e3e305054ef 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_automatic_stats +++ b/pkg/sql/logictest/testdata/logic_test/distsql_automatic_stats @@ -1,4 +1,4 @@ -# LogicTest: !metamorphic +# LogicTest: !metamorphic-batch-sizes # Disable automatic stats statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/lookup_join_local b/pkg/sql/logictest/testdata/logic_test/lookup_join_local index 6c045a5e9d5f..ef3eebcfe5ee 100644 --- a/pkg/sql/logictest/testdata/logic_test/lookup_join_local +++ b/pkg/sql/logictest/testdata/logic_test/lookup_join_local @@ -1,4 +1,4 @@ -# LogicTest: local !metamorphic +# LogicTest: local !metamorphic-batch-sizes # This test verifies that the row container used by the join reader spills to # disk in order to make room for the non-spillable internal state of the diff --git a/pkg/sql/opt/exec/execbuilder/testdata/explain_env b/pkg/sql/opt/exec/execbuilder/testdata/explain_env index a775cd5688e2..af850d6edfd4 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/explain_env +++ b/pkg/sql/opt/exec/execbuilder/testdata/explain_env @@ -1,4 +1,4 @@ -# LogicTest: local !metamorphic +# LogicTest: local !metamorphic-batch-sizes # We must turn off metamorphic variables because some are included in # EXPLAIN (OPT, ENV) output. diff --git a/pkg/testutils/serverutils/test_cluster_shim.go b/pkg/testutils/serverutils/test_cluster_shim.go index 03cbe68a0790..80821bc0c5fd 100644 --- a/pkg/testutils/serverutils/test_cluster_shim.go +++ b/pkg/testutils/serverutils/test_cluster_shim.go @@ -261,6 +261,11 @@ func StartNewTestCluster( ) TestClusterInterface { cluster := NewTestCluster(t, numNodes, args) cluster.Start(t) + for i := 0; i < cluster.NumServers(); i++ { + sysconfigProvider := cluster.Server(i).SystemConfigProvider() + sysconfig := sysconfigProvider.GetSystemConfig() + sysconfig.PurgeZoneConfigCache() + } return cluster }