diff --git a/build/teamcity-bazel-support.sh b/build/teamcity-bazel-support.sh index 538374cf0e1a..5bac3954ce8d 100644 --- a/build/teamcity-bazel-support.sh +++ b/build/teamcity-bazel-support.sh @@ -48,7 +48,7 @@ _tc_release_branch() { # `$BAZEL_BIN/pkg/cmd/github-post/github-post_/github-post` # artifacts_dir: usually `/artifacts` # test_json: path to test's JSON output, usually generated by `rules_go`'s and -# `GO_TEST_JSON_OUTPUT_FILE`. The file is removed after processing. +# `GO_TEST_JSON_OUTPUT_FILE`. # create_tarball: whether to create a tarball with full logs. If the test's # exit code is passed, the tarball is generated on failures. process_test_json() { @@ -58,12 +58,6 @@ process_test_json() { local test_json=$4 local create_tarball=$5 - # move test.json.txt to the artifacts directory in order to simplify tarball creation - if [ ! -e $artifacts_dir/test.json.txt ]; then - mv -f $test_json $artifacts_dir/test.json.txt - test_json=$artifacts_dir/test.json.txt - fi - $testfilter -mode=strip < "$test_json" | $testfilter -mode=omit | $testfilter -mode=convert > "$artifacts_dir"/failures.txt failures_size=$(stat --format=%s "$artifacts_dir"/failures.txt) if [ $failures_size = 0 ]; then @@ -103,8 +97,6 @@ process_test_json() { rm -rf "$artifacts_dir"/full_output.txt fi - rm -f "$test_json" - # Some unit tests test automatic ballast creation. These ballasts can be # larger than the maximum artifact size. Remove any artifacts with the # EMERGENCY_BALLAST filename. diff --git a/build/teamcity/cockroach/nightlies/stress_impl.sh b/build/teamcity/cockroach/nightlies/stress_impl.sh index 1003027518e5..835547fb47f5 100755 --- a/build/teamcity/cockroach/nightlies/stress_impl.sh +++ b/build/teamcity/cockroach/nightlies/stress_impl.sh @@ -15,7 +15,6 @@ fi bazel build //pkg/cmd/bazci //pkg/cmd/github-post //pkg/cmd/testfilter --config=ci BAZEL_BIN=$(bazel info bazel-bin --config=ci) ARTIFACTS_DIR=/artifacts -GO_TEST_JSON_OUTPUT_FILE=$ARTIFACTS_DIR/test.json.txt # Query to list all affected tests. PKG=${PKG#"./"} @@ -34,6 +33,7 @@ do continue fi exit_status=0 + GO_TEST_JSON_OUTPUT_FILE=$ARTIFACTS_DIR/$(echo "$test" | cut -d: -f2).test.json.txt $BAZEL_BIN/pkg/cmd/bazci/bazci_/bazci --config=ci test "$test" -- \ --test_env=COCKROACH_NIGHTLY_STRESS=true \ --test_env=GO_TEST_JSON_OUTPUT_FILE=$GO_TEST_JSON_OUTPUT_FILE \ diff --git a/build/toolchains/BUILD.bazel b/build/toolchains/BUILD.bazel index eb7b0673e23f..013d8da5b60b 100644 --- a/build/toolchains/BUILD.bazel +++ b/build/toolchains/BUILD.bazel @@ -8,6 +8,9 @@ toolchain( "@platforms//os:linux", "@platforms//cpu:x86_64", ], + target_settings = [ + ":cross", + ], toolchain = "@toolchain_cross_x86_64-unknown-linux-gnu//:toolchain", toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", ) @@ -30,6 +33,9 @@ toolchain( "@platforms//os:windows", "@platforms//cpu:x86_64", ], + target_settings = [ + ":cross", + ], toolchain = "@toolchain_cross_x86_64-w64-mingw32//:toolchain", toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", ) @@ -52,6 +58,9 @@ toolchain( "@platforms//os:macos", "@platforms//cpu:x86_64", ], + target_settings = [ + ":cross", + ], toolchain = "@toolchain_cross_x86_64-apple-darwin19//:toolchain", toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", ) @@ -74,6 +83,9 @@ toolchain( "@platforms//os:linux", "@platforms//cpu:arm64", ], + target_settings = [ + ":cross", + ], toolchain = "@toolchain_cross_aarch64-unknown-linux-gnu//:toolchain", toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", ) @@ -138,6 +150,13 @@ config_setting( }, ) +config_setting( + name = "cross", + define_values = { + "cockroach_cross": "y", + }, +) + config_setting( name = "is_cross_macos", constraint_values = [ diff --git a/docs/RFCS/20211106_multitenant_cluster_settings.md b/docs/RFCS/20211106_multitenant_cluster_settings.md index f6d3875d6155..9977252bed8a 100644 --- a/docs/RFCS/20211106_multitenant_cluster_settings.md +++ b/docs/RFCS/20211106_multitenant_cluster_settings.md @@ -215,11 +215,11 @@ following guidelines: - control settings relevant to tenant-specific internal implementation (like tenant throttling) that we want to be able to control per-tenant should be - `system`. + `tenant-ro`. - - when in doubt the first choice to consider should be `tenant-ro`. + - when in doubt the first choice to consider should be `tenant-rw`. - - `System` should be used with caution - we have to be sure that there is no + - `system` should be used with caution - we have to be sure that there is no internal code running on the tenant that needs to consult them. We fully hide `system` settings from non-system tenants. The cluster settings diff --git a/pkg/bench/rttanalysis/validate_benchmark_data.go b/pkg/bench/rttanalysis/validate_benchmark_data.go index fa0fea692149..16537921fce4 100644 --- a/pkg/bench/rttanalysis/validate_benchmark_data.go +++ b/pkg/bench/rttanalysis/validate_benchmark_data.go @@ -319,7 +319,10 @@ func (b benchmarkExpectations) find(name string) (benchmarkExpectation, bool) { } func (e benchmarkExpectation) matches(roundTrips int) bool { - return e.min <= roundTrips && roundTrips <= e.max + // Either the value falls within the expected range, or + return (e.min <= roundTrips && roundTrips <= e.max) || + // the expectation isn't a range, so give it a leeway of one. + e.min == e.max && (roundTrips == e.min-1 || roundTrips == e.min+1) } func (e benchmarkExpectation) String() string { diff --git a/pkg/ccl/backupccl/backup_planning.go b/pkg/ccl/backupccl/backup_planning.go index 9f8903f3d849..f3b56a12613b 100644 --- a/pkg/ccl/backupccl/backup_planning.go +++ b/pkg/ccl/backupccl/backup_planning.go @@ -82,6 +82,7 @@ var _ cloud.KMSEnv = &backupKMSEnv{} // featureBackupEnabled is used to enable and disable the BACKUP feature. var featureBackupEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "feature.backup.enabled", "set to true to enable backups, false to disable; default is true", featureflag.FeatureFlagEnabledDefault, diff --git a/pkg/ccl/backupccl/backup_processor.go b/pkg/ccl/backupccl/backup_processor.go index fe179f68e419..4729bb23d118 100644 --- a/pkg/ccl/backupccl/backup_processor.go +++ b/pkg/ccl/backupccl/backup_processor.go @@ -50,35 +50,41 @@ var backupOutputTypes = []*types.T{} var ( useTBI = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.bulk_io_write.experimental_incremental_export_enabled", "use experimental time-bound file filter when exporting in BACKUP", true, ) priorityAfter = settings.RegisterDurationSetting( + settings.TenantWritable, "bulkio.backup.read_with_priority_after", "amount of time since the read-as-of time above which a BACKUP should use priority when retrying reads", time.Minute, settings.NonNegativeDuration, ).WithPublic() delayPerAttmpt = settings.RegisterDurationSetting( + settings.TenantWritable, "bulkio.backup.read_retry_delay", "amount of time since the read-as-of time, per-prior attempt, to wait before making another attempt", time.Second*5, settings.NonNegativeDuration, ) timeoutPerAttempt = settings.RegisterDurationSetting( + settings.TenantWritable, "bulkio.backup.read_timeout", "amount of time after which a read attempt is considered timed out, which causes the backup to fail", time.Minute*5, settings.NonNegativeDuration, ).WithPublic() targetFileSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "bulkio.backup.file_size", "target size for individual data files produced during BACKUP", 128<<20, ).WithPublic() smallFileBuffer = settings.RegisterByteSizeSetting( + settings.TenantWritable, "bulkio.backup.merge_file_buffer_size", "size limit used when buffering backup files before merging them", 16<<20, @@ -86,6 +92,7 @@ var ( ) splitKeysOnTimestamps = settings.RegisterBoolSetting( + settings.TenantWritable, "bulkio.backup.split_keys_on_timestamps", "split backup data on timestamps when writing revision history", false, diff --git a/pkg/ccl/backupccl/create_scheduled_backup.go b/pkg/ccl/backupccl/create_scheduled_backup.go index b800e3d1e85b..14a1a2f9f5b8 100644 --- a/pkg/ccl/backupccl/create_scheduled_backup.go +++ b/pkg/ccl/backupccl/create_scheduled_backup.go @@ -61,6 +61,7 @@ var scheduledBackupOptionExpectValues = map[string]sql.KVStringOptValidate{ // scheduledBackupGCProtectionEnabled is used to enable and disable the chaining // of protected timestamps amongst scheduled backups. var scheduledBackupGCProtectionEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "schedules.backup.gc_protection.enabled", "enable chaining of GC protection across backups run as part of a schedule; default is false", false, /* defaultValue */ diff --git a/pkg/ccl/backupccl/restore_data_processor.go b/pkg/ccl/backupccl/restore_data_processor.go index 7c8c6e216831..f61a2e7b3dca 100644 --- a/pkg/ccl/backupccl/restore_data_processor.go +++ b/pkg/ccl/backupccl/restore_data_processor.go @@ -94,6 +94,7 @@ var defaultNumWorkers = util.ConstantWithMetamorphicTestRange( // The maximum is not enforced since if the maximum is reduced in the future that // may cause the cluster setting to fail. var numRestoreWorkers = settings.RegisterIntSetting( + settings.TenantWritable, "kv.bulk_io_write.restore_node_concurrency", fmt.Sprintf("the number of workers processing a restore per job per node; maximum %d", maxConcurrentRestoreWorkers), diff --git a/pkg/ccl/backupccl/restore_planning.go b/pkg/ccl/backupccl/restore_planning.go index 1508fa0371cb..b122b1f54687 100644 --- a/pkg/ccl/backupccl/restore_planning.go +++ b/pkg/ccl/backupccl/restore_planning.go @@ -84,6 +84,7 @@ var allowedDebugPauseOnValues = map[string]struct{}{ // featureRestoreEnabled is used to enable and disable the RESTORE feature. var featureRestoreEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "feature.restore.enabled", "set to true to enable restore, false to disable; default is true", featureflag.FeatureFlagEnabledDefault, diff --git a/pkg/ccl/changefeedccl/changefeed_stmt.go b/pkg/ccl/changefeedccl/changefeed_stmt.go index 4dd24895c3b0..3cef5eb5b3fb 100644 --- a/pkg/ccl/changefeedccl/changefeed_stmt.go +++ b/pkg/ccl/changefeedccl/changefeed_stmt.go @@ -60,6 +60,7 @@ import ( // featureChangefeedEnabled is used to enable and disable the CHANGEFEED feature. var featureChangefeedEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "feature.changefeed.enabled", "set to true to enable changefeeds, false to disable; default is true", featureflag.FeatureFlagEnabledDefault, diff --git a/pkg/ccl/changefeedccl/changefeedbase/settings.go b/pkg/ccl/changefeedccl/changefeedbase/settings.go index e0aa0b08d0f4..4651dac53413 100644 --- a/pkg/ccl/changefeedccl/changefeedbase/settings.go +++ b/pkg/ccl/changefeedccl/changefeedbase/settings.go @@ -22,6 +22,7 @@ import ( // NB: The more generic name of this setting precedes its current // interpretation. It used to control additional polling rates. var TableDescriptorPollInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "changefeed.experimental_poll_interval", "polling interval for the table descriptors", 1*time.Second, @@ -43,6 +44,7 @@ func TestingSetDefaultMinCheckpointFrequency(f time.Duration) func() { // PerChangefeedMemLimit controls how much data can be buffered by // a single changefeed. var PerChangefeedMemLimit = settings.RegisterByteSizeSetting( + settings.TenantWritable, "changefeed.memory.per_changefeed_limit", "controls amount of data that can be buffered per changefeed", 1<<30, @@ -50,6 +52,7 @@ var PerChangefeedMemLimit = settings.RegisterByteSizeSetting( // SlowSpanLogThreshold controls when we will log slow spans. var SlowSpanLogThreshold = settings.RegisterDurationSetting( + settings.TenantWritable, "changefeed.slow_span_log_threshold", "a changefeed will log spans with resolved timestamps this far behind the current wall-clock time; if 0, a default value is calculated based on other cluster settings", 0, @@ -58,6 +61,7 @@ var SlowSpanLogThreshold = settings.RegisterDurationSetting( // FrontierCheckpointFrequency controls the frequency of frontier checkpoints. var FrontierCheckpointFrequency = settings.RegisterDurationSetting( + settings.TenantWritable, "changefeed.frontier_checkpoint_frequency", "controls the frequency with which span level checkpoints will be written; if 0, disabled.", 10*time.Minute, @@ -78,6 +82,7 @@ var FrontierCheckpointFrequency = settings.RegisterDurationSetting( // Therefore, we should write at most 6 MB of checkpoint/hour; OR, based on the default // FrontierCheckpointFrequency setting, 1 MB per checkpoint. var FrontierCheckpointMaxBytes = settings.RegisterByteSizeSetting( + settings.TenantWritable, "changefeed.frontier_checkpoint_max_bytes", "controls the maximum size of the checkpoint as a total size of key bytes", 1<<20, @@ -87,6 +92,7 @@ var FrontierCheckpointMaxBytes = settings.RegisterByteSizeSetting( // Scan requests are issued when changefeed performs the backfill. // If set to 0, a reasonable default will be chosen. var ScanRequestLimit = settings.RegisterIntSetting( + settings.TenantWritable, "changefeed.backfill.concurrent_scan_requests", "number of concurrent scan requests per node issued during a backfill", 0, @@ -112,6 +118,7 @@ type SinkThrottleConfig struct { // NodeSinkThrottleConfig is the node wide throttling configuration for changefeeds. var NodeSinkThrottleConfig = func() *settings.StringSetting { s := settings.RegisterValidatedStringSetting( + settings.TenantWritable, "changefeed.node_throttle_config", "specifies node level throttling configuration for all changefeeeds", "", @@ -133,6 +140,7 @@ func validateSinkThrottleConfig(values *settings.Values, configStr string) error // MinHighWaterMarkCheckpointAdvance specifies the minimum amount of time the // changefeed high water mark must advance for it to be eligible for checkpointing. var MinHighWaterMarkCheckpointAdvance = settings.RegisterDurationSetting( + settings.TenantWritable, "changefeed.min_highwater_advance", "minimum amount of time the changefeed high water mark must advance "+ "for it to be eligible for checkpointing; Default of 0 will checkpoint every time frontier "+ @@ -148,6 +156,7 @@ var MinHighWaterMarkCheckpointAdvance = settings.RegisterDurationSetting( // with complex schemes to accurately measure and adjust current memory usage, // we'll request the amount of memory multiplied by this fudge factor. var EventMemoryMultiplier = settings.RegisterFloatSetting( + settings.TenantWritable, "changefeed.event_memory_multiplier", "the amount of memory required to process an event is multiplied by this factor", 3, diff --git a/pkg/ccl/importccl/import_planning.go b/pkg/ccl/importccl/import_planning.go index 01f6ac0a77e4..525b371bc7c3 100644 --- a/pkg/ccl/importccl/import_planning.go +++ b/pkg/ccl/importccl/import_planning.go @@ -191,6 +191,7 @@ var allowedIntoFormats = map[string]struct{}{ // featureImportEnabled is used to enable and disable the IMPORT feature. var featureImportEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "feature.import.enabled", "set to true to enable imports, false to disable; default is true", featureflag.FeatureFlagEnabledDefault, diff --git a/pkg/ccl/importccl/import_processor.go b/pkg/ccl/importccl/import_processor.go index feeed6464011..b6d3b34f3eea 100644 --- a/pkg/ccl/importccl/import_processor.go +++ b/pkg/ccl/importccl/import_processor.go @@ -47,6 +47,7 @@ const readImportDataProcessorName = "readImportDataProcessor" var importPKAdderBufferSize = func() *settings.ByteSizeSetting { s := settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.bulk_ingest.pk_buffer_size", "the initial size of the BulkAdder buffer handling primary index imports", 32<<20, @@ -56,6 +57,7 @@ var importPKAdderBufferSize = func() *settings.ByteSizeSetting { var importPKAdderMaxBufferSize = func() *settings.ByteSizeSetting { s := settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.bulk_ingest.max_pk_buffer_size", "the maximum size of the BulkAdder buffer handling primary index imports", 128<<20, @@ -65,6 +67,7 @@ var importPKAdderMaxBufferSize = func() *settings.ByteSizeSetting { var importIndexAdderBufferSize = func() *settings.ByteSizeSetting { s := settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.bulk_ingest.index_buffer_size", "the initial size of the BulkAdder buffer handling secondary index imports", 32<<20, @@ -74,6 +77,7 @@ var importIndexAdderBufferSize = func() *settings.ByteSizeSetting { var importIndexAdderMaxBufferSize = func() *settings.ByteSizeSetting { s := settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.bulk_ingest.max_index_buffer_size", "the maximum size of the BulkAdder buffer handling secondary index imports", 512<<20, @@ -83,6 +87,7 @@ var importIndexAdderMaxBufferSize = func() *settings.ByteSizeSetting { var importBufferIncrementSize = func() *settings.ByteSizeSetting { s := settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.bulk_ingest.buffer_increment", "the size by which the BulkAdder attempts to grow its buffer before flushing", 32<<20, diff --git a/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads.go b/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads.go index f4d748d3f265..994d7575ed54 100644 --- a/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads.go +++ b/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads.go @@ -35,6 +35,7 @@ import ( // measure of how long closed timestamp updates are supposed to take from the // leaseholder to the followers. var ClosedTimestampPropagationSlack = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.closed_timestamp.propagation_slack", "a conservative estimate of the amount of time expect for closed timestamps to "+ "propagate from a leaseholder to followers. This is taken into account by "+ diff --git a/pkg/ccl/multitenantccl/tenantcostclient/tenant_side.go b/pkg/ccl/multitenantccl/tenantcostclient/tenant_side.go index d8f797b2a427..a8c274370d65 100644 --- a/pkg/ccl/multitenantccl/tenantcostclient/tenant_side.go +++ b/pkg/ccl/multitenantccl/tenantcostclient/tenant_side.go @@ -31,6 +31,7 @@ import ( // TargetPeriodSetting is exported for testing purposes. var TargetPeriodSetting = settings.RegisterDurationSetting( + settings.TenantWritable, "tenant_cost_control_period", "target duration between token bucket requests from tenants (requires restart)", 10*time.Second, @@ -39,6 +40,7 @@ var TargetPeriodSetting = settings.RegisterDurationSetting( // CPUUsageAllowance is exported for testing purposes. var CPUUsageAllowance = settings.RegisterDurationSetting( + settings.TenantWritable, "tenant_cpu_usage_allowance", "this much CPU usage per second is considered background usage and "+ "doesn't contribute to consumption; for example, if it is set to 10ms, "+ diff --git a/pkg/ccl/multitenantccl/tenantcostserver/server.go b/pkg/ccl/multitenantccl/tenantcostserver/server.go index aeed475fde97..fdf6c839adf4 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/server.go +++ b/pkg/ccl/multitenantccl/tenantcostserver/server.go @@ -32,6 +32,7 @@ type instance struct { // Note: the "four" in the description comes from // tenantcostclient.extendedReportingPeriodFactor. var instanceInactivity = settings.RegisterDurationSetting( + settings.TenantWritable, "tenant_usage_instance_inactivity", "instances that have not reported consumption for longer than this value are cleaned up; "+ "should be at least four times higher than the tenant_cost_control_period of any tenant", diff --git a/pkg/ccl/oidcccl/settings.go b/pkg/ccl/oidcccl/settings.go index c4484dd12d26..fbad1cf36bfc 100644 --- a/pkg/ccl/oidcccl/settings.go +++ b/pkg/ccl/oidcccl/settings.go @@ -38,6 +38,7 @@ const ( // OIDCEnabled enables or disabled OIDC login for the DB Console. var OIDCEnabled = func() *settings.BoolSetting { s := settings.RegisterBoolSetting( + settings.TenantWritable, OIDCEnabledSettingName, "enables or disabled OIDC login for the DB Console", false, @@ -49,6 +50,7 @@ var OIDCEnabled = func() *settings.BoolSetting { // OIDCClientID is the OIDC client id. var OIDCClientID = func() *settings.StringSetting { s := settings.RegisterStringSetting( + settings.TenantWritable, OIDCClientIDSettingName, "sets OIDC client id", "", @@ -60,6 +62,7 @@ var OIDCClientID = func() *settings.StringSetting { // OIDCClientSecret is the OIDC client secret. var OIDCClientSecret = func() *settings.StringSetting { s := settings.RegisterStringSetting( + settings.TenantWritable, OIDCClientSecretSettingName, "sets OIDC client secret", "", @@ -170,6 +173,7 @@ func validateOIDCRedirectURL(values *settings.Values, s string) error { // will use the required `default_url` callback URL. var OIDCRedirectURL = func() *settings.StringSetting { s := settings.RegisterValidatedStringSetting( + settings.TenantWritable, OIDCRedirectURLSettingName, "sets OIDC redirect URL via a URL string or a JSON string containing a required "+ "`redirect_urls` key with an object that maps from region keys to URL strings "+ @@ -186,6 +190,7 @@ var OIDCRedirectURL = func() *settings.StringSetting { // provider. var OIDCProviderURL = func() *settings.StringSetting { s := settings.RegisterValidatedStringSetting( + settings.TenantWritable, OIDCProviderURLSettingName, "sets OIDC provider URL ({provider_url}/.well-known/openid-configuration must resolve)", "", @@ -204,6 +209,7 @@ var OIDCProviderURL = func() *settings.StringSetting { // OIDCScopes contains the list of scopes to request from the auth provider. var OIDCScopes = func() *settings.StringSetting { s := settings.RegisterValidatedStringSetting( + settings.TenantWritable, OIDCScopesSettingName, "sets OIDC scopes to include with authentication request "+ "(space delimited list of strings, required to start with `openid`)", @@ -223,6 +229,7 @@ var OIDCScopes = func() *settings.StringSetting { // OIDCClaimJSONKey is the key of the claim to extract from the OIDC id_token. var OIDCClaimJSONKey = func() *settings.StringSetting { s := settings.RegisterStringSetting( + settings.TenantWritable, OIDCClaimJSONKeySettingName, "sets JSON key of principal to extract from payload after OIDC authentication completes "+ "(usually email or sid)", @@ -235,6 +242,7 @@ var OIDCClaimJSONKey = func() *settings.StringSetting { // claim value to conver it to a DB principal. var OIDCPrincipalRegex = func() *settings.StringSetting { s := settings.RegisterValidatedStringSetting( + settings.TenantWritable, OIDCPrincipalRegexSettingName, "regular expression to apply to extracted principal (see claim_json_key setting) to "+ "translate to SQL user (golang regex format, must include 1 grouping to extract)", @@ -255,6 +263,7 @@ var OIDCPrincipalRegex = func() *settings.StringSetting { // login with OIDC. var OIDCButtonText = func() *settings.StringSetting { s := settings.RegisterStringSetting( + settings.TenantWritable, OIDCButtonTextSettingName, "text to show on button on DB Console login page to login with your OIDC provider "+ "(only shown if OIDC is enabled)", @@ -267,6 +276,7 @@ var OIDCButtonText = func() *settings.StringSetting { // the DB Console. var OIDCAutoLogin = func() *settings.BoolSetting { s := settings.RegisterBoolSetting( + settings.TenantWritable, OIDCAutoLoginSettingName, "if true, logged-out visitors to the DB Console will be "+ "automatically redirected to the OIDC login endpoint", diff --git a/pkg/ccl/storageccl/external_sst_reader.go b/pkg/ccl/storageccl/external_sst_reader.go index e368b3b08b47..5c1818cb7fc7 100644 --- a/pkg/ccl/storageccl/external_sst_reader.go +++ b/pkg/ccl/storageccl/external_sst_reader.go @@ -26,12 +26,14 @@ import ( ) var remoteSSTs = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.bulk_ingest.stream_external_ssts.enabled", "if enabled, external SSTables are iterated directly in some cases, rather than being downloaded entirely first", true, ) var remoteSSTSuffixCacheSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.bulk_ingest.stream_external_ssts.suffix_cache_size", "size of suffix of remote SSTs to download and cache before reading from remote stream", 64<<10, diff --git a/pkg/ccl/storageccl/import.go b/pkg/ccl/storageccl/import.go index 27eb18d1d1a4..8fd8ce3623eb 100644 --- a/pkg/ccl/storageccl/import.go +++ b/pkg/ccl/storageccl/import.go @@ -21,6 +21,7 @@ import ( // payload in an AddSSTable request. var IngestBatchSize = func() *settings.ByteSizeSetting { s := settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.bulk_ingest.batch_size", "the maximum size of the payload in an AddSSTable request", 16<<20, diff --git a/pkg/ccl/streamingccl/settings.go b/pkg/ccl/streamingccl/settings.go index 384a9a27295e..de558b2541ac 100644 --- a/pkg/ccl/streamingccl/settings.go +++ b/pkg/ccl/streamingccl/settings.go @@ -17,6 +17,7 @@ import ( // StreamReplicationStreamLivenessTrackFrequency controls frequency to check // the liveness of a streaming replication producer job. var StreamReplicationStreamLivenessTrackFrequency = settings.RegisterDurationSetting( + settings.TenantWritable, "stream_replication.stream_liveness_track_frequency", "controls how frequent we check for the liveness of a replication stream producer job", time.Minute, @@ -25,6 +26,7 @@ var StreamReplicationStreamLivenessTrackFrequency = settings.RegisterDurationSet // StreamReplicationJobLivenessTimeout controls how long we wait for to kill // an inactive producer job. var StreamReplicationJobLivenessTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "stream_replication.job_liveness_timeout", "controls how long we wait for to kill an inactive producer job", time.Minute, diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go index 49933fe2df49..cace59a73015 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go @@ -33,6 +33,7 @@ import ( // PartitionProgressFrequency controls the frequency of partition progress checkopints. var PartitionProgressFrequency = settings.RegisterDurationSetting( + settings.TenantWritable, "streaming.partition_progress_frequency", "controls the frequency with which partitions update their progress; if 0, disabled.", 10*time.Second, diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go index 07f8b14cd30f..f259d8c3929a 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go @@ -39,6 +39,7 @@ import ( ) var minimumFlushInterval = settings.RegisterPublicDurationSettingWithExplicitUnit( + settings.TenantWritable, "bulkio.stream_ingestion.minimum_flush_interval", "the minimum timestamp between flushes; flushes may still occur if internal buffers fill up", 5*time.Second, @@ -49,6 +50,7 @@ var minimumFlushInterval = settings.RegisterPublicDurationSettingWithExplicitUni // the system.jobs table to check whether the stream ingestion job has been // signaled to cutover. var cutoverSignalPollInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "bulkio.stream_ingestion.cutover_signal_poll_interval", "the interval at which the stream ingestion job checks if it has been signaled to cutover", 30*time.Second, diff --git a/pkg/ccl/utilccl/license_check.go b/pkg/ccl/utilccl/license_check.go index 54785d31d525..7d0baed21823 100644 --- a/pkg/ccl/utilccl/license_check.go +++ b/pkg/ccl/utilccl/license_check.go @@ -34,6 +34,7 @@ import ( var enterpriseLicense = func() *settings.StringSetting { s := settings.RegisterValidatedStringSetting( + settings.TenantWritable, "enterprise.license", "the encoded cluster license", "", diff --git a/pkg/cli/cpuprofile.go b/pkg/cli/cpuprofile.go index 5737e70c17f2..ed1bac6e6ee4 100644 --- a/pkg/cli/cpuprofile.go +++ b/pkg/cli/cpuprofile.go @@ -30,6 +30,7 @@ import ( ) var maxCombinedCPUProfFileSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "server.cpu_profile.total_dump_size_limit", "maximum combined disk size of preserved CPU profiles", 128<<20, // 128MiB diff --git a/pkg/cli/gen.go b/pkg/cli/gen.go index 2eee67f9539d..47c8686b526c 100644 --- a/pkg/cli/gen.go +++ b/pkg/cli/gen.go @@ -217,7 +217,7 @@ Output the list of cluster settings known to this binary. panic(fmt.Sprintf("could not find setting %q", name)) } - if excludeSystemSettings && setting.SystemOnly() { + if excludeSystemSettings && setting.Class() == settings.SystemOnly { continue } diff --git a/pkg/cloud/amazon/s3_storage.go b/pkg/cloud/amazon/s3_storage.go index af13f77e1bd1..8a52f30e4110 100644 --- a/pkg/cloud/amazon/s3_storage.go +++ b/pkg/cloud/amazon/s3_storage.go @@ -83,6 +83,7 @@ type s3Client struct { } var reuseSession = settings.RegisterBoolSetting( + settings.TenantWritable, "cloudstorage.s3.session_reuse.enabled", "persist the last opened s3 session and re-use it when opening a new session with the same arguments", true, diff --git a/pkg/cloud/cloud_io.go b/pkg/cloud/cloud_io.go index cd5edf36569c..580e1e9594dd 100644 --- a/pkg/cloud/cloud_io.go +++ b/pkg/cloud/cloud_io.go @@ -35,12 +35,14 @@ import ( // Timeout is a cluster setting used for cloud storage interactions. var Timeout = settings.RegisterDurationSetting( + settings.TenantWritable, "cloudstorage.timeout", "the timeout for import/export storage operations", 10*time.Minute, ).WithPublic() var httpCustomCA = settings.RegisterStringSetting( + settings.TenantWritable, "cloudstorage.http.custom_ca", "custom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storage", "", diff --git a/pkg/cloud/gcp/gcs_storage.go b/pkg/cloud/gcp/gcs_storage.go index ce3aeb30ce91..0decb8e04db7 100644 --- a/pkg/cloud/gcp/gcs_storage.go +++ b/pkg/cloud/gcp/gcs_storage.go @@ -47,6 +47,7 @@ const ( // gcsChunkingEnabled is used to enable and disable chunking of file upload to // Google Cloud Storage. var gcsChunkingEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "cloudstorage.gs.chunking.enabled", "enable chunking of file upload to Google Cloud Storage", true, /* default */ diff --git a/pkg/clusterversion/clusterversion_test.go b/pkg/clusterversion/clusterversion_test.go index b1365355f002..839818acea3c 100644 --- a/pkg/clusterversion/clusterversion_test.go +++ b/pkg/clusterversion/clusterversion_test.go @@ -29,6 +29,7 @@ func TestClusterVersionOnChange(t *testing.T) { cvs := &clusterVersionSetting{} cvs.VersionSetting = settings.MakeVersionSetting(cvs) settings.RegisterVersionSetting( + settings.TenantWritable, "dummy version key", "test description", &cvs.VersionSetting) diff --git a/pkg/clusterversion/setting.go b/pkg/clusterversion/setting.go index dc7a08197678..df641410c534 100644 --- a/pkg/clusterversion/setting.go +++ b/pkg/clusterversion/setting.go @@ -62,6 +62,7 @@ func registerClusterVersionSetting() *clusterVersionSetting { s := &clusterVersionSetting{} s.VersionSetting = settings.MakeVersionSetting(s) settings.RegisterVersionSetting( + settings.TenantWritable, KeyVersionSetting, "set the active cluster version in the format '.'", // hide optional `-, &s.VersionSetting) @@ -241,6 +242,7 @@ var preserveDowngradeVersion = registerPreserveDowngradeVersionSetting() func registerPreserveDowngradeVersionSetting() *settings.StringSetting { s := settings.RegisterValidatedStringSetting( + settings.TenantWritable, "cluster.preserve_downgrade_option", "disable (automatic or manual) cluster version upgrade from the specified version until reset", "", diff --git a/pkg/cmd/github-pull-request-make/main.go b/pkg/cmd/github-pull-request-make/main.go index 5552dc0440c5..41c1b3af7849 100644 --- a/pkg/cmd/github-pull-request-make/main.go +++ b/pkg/cmd/github-pull-request-make/main.go @@ -294,6 +294,11 @@ func main() { } } args = append(args, "--") + if target == "stressrace" { + args = append(args, "--config=race") + } else { + args = append(args, "--test_sharding_strategy=disabled") + } var filters []string for _, test := range pkg.tests { filters = append(filters, "^"+test+"$") @@ -304,11 +309,6 @@ func main() { // Give the entire test 1 more minute than the duration to wrap up. args = append(args, fmt.Sprintf("--test_timeout=%d", int((duration+1*time.Minute).Seconds()))) args = append(args, "--run_under", fmt.Sprintf("%s -stderr -maxfails 1 -maxtime %s -p %d", bazelStressTarget, duration, parallelism)) - if target == "stressrace" { - args = append(args, "--config=race") - } else { - args = append(args, "--test_sharding_strategy=disabled") - } // NB: bazci is expected to be put in `PATH` by the caller. cmd := exec.Command("bazci", args...) cmd.Stdout = os.Stdout diff --git a/pkg/jobs/config.go b/pkg/jobs/config.go index b1ece4c86d5e..d2c7f7d5a7bf 100644 --- a/pkg/jobs/config.go +++ b/pkg/jobs/config.go @@ -74,6 +74,7 @@ const ( var ( intervalBaseSetting = settings.RegisterFloatSetting( + settings.TenantWritable, intervalBaseSettingKey, "the base multiplier for other intervals such as adopt, cancel, and gc", defaultIntervalBase, @@ -81,6 +82,7 @@ var ( ) adoptIntervalSetting = settings.RegisterDurationSetting( + settings.TenantWritable, adoptIntervalSettingKey, "the interval at which a node (a) claims some of the pending jobs and "+ "(b) restart its already claimed jobs that are in running or reverting "+ @@ -90,6 +92,7 @@ var ( ) cancelIntervalSetting = settings.RegisterDurationSetting( + settings.TenantWritable, cancelIntervalSettingKey, "the interval at which a node cancels the jobs belonging to the known "+ "dead sessions", @@ -98,6 +101,7 @@ var ( ) gcIntervalSetting = settings.RegisterDurationSetting( + settings.TenantWritable, gcIntervalSettingKey, "the interval a node deletes expired job records that have exceeded their "+ "retention duration", @@ -106,6 +110,7 @@ var ( ) retentionTimeSetting = settings.RegisterDurationSetting( + settings.TenantWritable, retentionTimeSettingKey, "the amount of time to retain records for completed jobs before", defaultRetentionTime, @@ -113,6 +118,7 @@ var ( ).WithPublic() cancellationsUpdateLimitSetting = settings.RegisterIntSetting( + settings.TenantWritable, cancelUpdateLimitKey, "the number of jobs that can be updated when canceling jobs concurrently from dead sessions", defaultCancellationsUpdateLimit, @@ -120,6 +126,7 @@ var ( ) retryInitialDelaySetting = settings.RegisterDurationSetting( + settings.TenantWritable, retryInitialDelaySettingKey, "the starting duration of exponential-backoff delay"+ " to retry a job which encountered a retryable error or had its coordinator"+ @@ -129,6 +136,7 @@ var ( ) retryMaxDelaySetting = settings.RegisterDurationSetting( + settings.TenantWritable, retryMaxDelaySettingKey, "the maximum duration by which a job can be delayed to retry", defaultRetryMaxDelay, @@ -136,6 +144,7 @@ var ( ) executionErrorsMaxEntriesSetting = settings.RegisterIntSetting( + settings.TenantWritable, executionErrorsMaxEntriesKey, "the maximum number of retriable error entries which will be stored for introspection", defaultExecutionErrorsMaxEntries, @@ -143,6 +152,7 @@ var ( ) executionErrorsMaxEntrySize = settings.RegisterByteSizeSetting( + settings.TenantWritable, executionErrorsMaxEntrySizeKey, "the maximum byte size of individual error entries which will be stored"+ " for introspection", @@ -151,6 +161,7 @@ var ( ) debugPausepoints = settings.RegisterStringSetting( + settings.TenantWritable, debugPausePointsSettingKey, "the list, comma separated, of named pausepoints currently enabled for debugging", "", diff --git a/pkg/jobs/job_scheduler.go b/pkg/jobs/job_scheduler.go index af6879927220..7d819df05f03 100644 --- a/pkg/jobs/job_scheduler.go +++ b/pkg/jobs/job_scheduler.go @@ -385,18 +385,21 @@ func (s *jobScheduler) runDaemon(ctx context.Context, stopper *stop.Stopper) { } var schedulerEnabledSetting = settings.RegisterBoolSetting( + settings.TenantWritable, "jobs.scheduler.enabled", "enable/disable job scheduler", true, ) var schedulerPaceSetting = settings.RegisterDurationSetting( + settings.TenantWritable, "jobs.scheduler.pace", "how often to scan system.scheduled_jobs table", time.Minute, ) var schedulerMaxJobsPerIterationSetting = settings.RegisterIntSetting( + settings.TenantWritable, "jobs.scheduler.max_jobs_per_iteration", "how many schedules to start per iteration; setting to 0 turns off this limit", 10, diff --git a/pkg/jobs/jobs.go b/pkg/jobs/jobs.go index dac6e12927ea..d7e004090db8 100644 --- a/pkg/jobs/jobs.go +++ b/pkg/jobs/jobs.go @@ -49,6 +49,7 @@ const ( ) var traceableJobDumpTraceMode = settings.RegisterEnumSetting( + settings.TenantWritable, "jobs.trace.force_dump_mode", "determines the state in which all traceable jobs will dump their cluster wide, inflight, "+ "trace recordings. Traces may be dumped never, on fail, "+ diff --git a/pkg/kv/bulk/sst_batcher.go b/pkg/kv/bulk/sst_batcher.go index 5a5716e79bc8..affc8e75a023 100644 --- a/pkg/kv/bulk/sst_batcher.go +++ b/pkg/kv/bulk/sst_batcher.go @@ -33,12 +33,14 @@ import ( var ( tooSmallSSTSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.bulk_io_write.small_write_size", "size below which a 'bulk' write will be performed as a normal write instead", 400*1<<10, // 400 Kib ) ingestDelay = settings.RegisterDurationSetting( + settings.TenantWritable, "bulkio.ingest.flush_delay", "amount of time to wait before sending a file to the KV/Storage layer to ingest", 0, diff --git a/pkg/kv/kvclient/kvcoord/dist_sender.go b/pkg/kv/kvclient/kvcoord/dist_sender.go index 5f87ac4d6241..ffeb413bd918 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender.go @@ -166,12 +166,14 @@ const ( ) var rangeDescriptorCacheSize = settings.RegisterIntSetting( + settings.TenantWritable, "kv.range_descriptor_cache.size", "maximum number of entries in the range descriptor cache", 1e6, ) var senderConcurrencyLimit = settings.RegisterIntSetting( + settings.TenantWritable, "kv.dist_sender.concurrency_limit", "maximum number of asynchronous send requests", max(defaultSenderConcurrency, int64(64*runtime.GOMAXPROCS(0))), diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go index cd6c22723e82..7fc12771be54 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go @@ -25,6 +25,7 @@ import ( ) var parallelCommitsEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.transaction.parallel_commits_enabled", "if enabled, transactional commits will be parallelized with transactional writes", true, diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go index 16565de5d75c..3ce69beaf955 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go @@ -30,11 +30,13 @@ import ( const txnPipelinerBtreeDegree = 32 var pipelinedWritesEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.transaction.write_pipelining_enabled", "if enabled, transactional writes are pipelined through Raft consensus", true, ) var pipelinedWritesMaxBatchSize = settings.RegisterIntSetting( + settings.TenantWritable, "kv.transaction.write_pipelining_max_batch_size", "if non-zero, defines that maximum size batch that will be pipelined through Raft consensus", // NB: there is a tradeoff between the overhead of synchronously waiting for @@ -73,6 +75,7 @@ var pipelinedWritesMaxBatchSize = settings.RegisterIntSetting( // find matching intents. // See #54029 for more details. var trackedWritesMaxSize = settings.RegisterIntSetting( + settings.TenantWritable, "kv.transaction.max_intents_bytes", "maximum number of bytes used to track locks in transactions", 1<<22, /* 4 MB */ @@ -81,6 +84,7 @@ var trackedWritesMaxSize = settings.RegisterIntSetting( // rejectTxnOverTrackedWritesBudget dictates what happens when a txn exceeds // kv.transaction.max_intents_bytes. var rejectTxnOverTrackedWritesBudget = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.transaction.reject_over_max_intents_budget.enabled", "if set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) "+ "are rejected instead of having their lock spans imprecisely compressed", diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher.go index bb8d9b4732f5..f2383a3b6a1d 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher.go @@ -36,6 +36,7 @@ const ( // on the coordinator during the lifetime of a transaction. Refresh spans // are used for SERIALIZABLE transactions to avoid client restarts. var MaxTxnRefreshSpansBytes = settings.RegisterIntSetting( + settings.TenantWritable, "kv.transaction.max_refresh_spans_bytes", "maximum number of bytes used to track refresh spans in serializable transactions", 256*1000, diff --git a/pkg/kv/kvclient/rangefeed/db_adapter.go b/pkg/kv/kvclient/rangefeed/db_adapter.go index d1f11df7d53a..832c9adc1824 100644 --- a/pkg/kv/kvclient/rangefeed/db_adapter.go +++ b/pkg/kv/kvclient/rangefeed/db_adapter.go @@ -39,6 +39,7 @@ type dbAdapter struct { var _ kvDB = (*dbAdapter)(nil) var maxScanParallelism = settings.RegisterIntSetting( + settings.TenantWritable, "kv.rangefeed.max_scan_parallelism", "maximum number of concurrent scan requests that can be issued during initial scan", 64, diff --git a/pkg/kv/kvprober/settings.go b/pkg/kv/kvprober/settings.go index 5cd23c190a73..2460cf290f71 100644 --- a/pkg/kv/kvprober/settings.go +++ b/pkg/kv/kvprober/settings.go @@ -22,6 +22,7 @@ import ( // ensures that kvprober will not be significantly affected if the cluster is // overloaded. var bypassAdmissionControl = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.prober.bypass_admission_control.enabled", "set to bypass admission control queue for kvprober requests; "+ "note that dedicated clusters should have this set as users own capacity planning "+ @@ -30,6 +31,7 @@ var bypassAdmissionControl = settings.RegisterBoolSetting( ) var readEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.prober.read.enabled", "whether the KV read prober is enabled", false) @@ -37,6 +39,7 @@ var readEnabled = settings.RegisterBoolSetting( // TODO(josh): Another option is for the cluster setting to be a QPS target // for the cluster as a whole. var readInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.prober.read.interval", "how often each node sends a read probe to the KV layer on average (jitter is added); "+ "note that a very slow read can block kvprober from sending additional probes; "+ @@ -49,6 +52,7 @@ var readInterval = settings.RegisterDurationSetting( }) var readTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.prober.read.timeout", // Slow enough response times are not different than errors from the // perspective of the user. @@ -63,11 +67,13 @@ var readTimeout = settings.RegisterDurationSetting( }) var writeEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.prober.write.enabled", "whether the KV write prober is enabled", false) var writeInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.prober.write.interval", "how often each node sends a write probe to the KV layer on average (jitter is added); "+ "note that a very slow read can block kvprober from sending additional probes; "+ @@ -80,6 +86,7 @@ var writeInterval = settings.RegisterDurationSetting( }) var writeTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.prober.write.timeout", // Slow enough response times are not different than errors from the // perspective of the user. @@ -94,6 +101,7 @@ var writeTimeout = settings.RegisterDurationSetting( }) var scanMeta2Timeout = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.prober.planner.scan_meta2.timeout", "timeout on scanning meta2 via db.Scan with max rows set to "+ "kv.prober.planner.num_steps_to_plan_at_once", @@ -105,6 +113,7 @@ var scanMeta2Timeout = settings.RegisterDurationSetting( }) var numStepsToPlanAtOnce = settings.RegisterIntSetting( + settings.TenantWritable, "kv.prober.planner.num_steps_to_plan_at_once", "the number of Steps to plan at once, where a Step is a decision on "+ "what range to probe; the order of the Steps is randomized within "+ diff --git a/pkg/kv/kvserver/allocator.go b/pkg/kv/kvserver/allocator.go index f16e63c0d96c..a70bb976b76f 100644 --- a/pkg/kv/kvserver/allocator.go +++ b/pkg/kv/kvserver/allocator.go @@ -59,6 +59,7 @@ var MinLeaseTransferStatsDuration = 30 * time.Second // via the new heuristic based on request load and latency or via the simpler // approach that purely seeks to balance the number of leases per node evenly. var enableLoadBasedLeaseRebalancing = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.allocator.load_based_lease_rebalancing.enabled", "set to enable rebalancing of range leases based on load and latency", true, @@ -73,6 +74,7 @@ var enableLoadBasedLeaseRebalancing = settings.RegisterBoolSetting( // Setting this to 0 effectively disables load-based lease rebalancing, and // settings less than 0 are disallowed. var leaseRebalancingAggressiveness = settings.RegisterFloatSetting( + settings.TenantWritable, "kv.allocator.lease_rebalancing_aggressiveness", "set greater than 1.0 to rebalance leases toward load more aggressively, "+ "or between 0 and 1.0 to be more conservative about rebalancing leases", diff --git a/pkg/kv/kvserver/allocator_scorer.go b/pkg/kv/kvserver/allocator_scorer.go index ce5b79cfc856..4367e938e1e4 100644 --- a/pkg/kv/kvserver/allocator_scorer.go +++ b/pkg/kv/kvserver/allocator_scorer.go @@ -77,6 +77,7 @@ const ( // of ranges. var rangeRebalanceThreshold = func() *settings.FloatSetting { s := settings.RegisterFloatSetting( + settings.TenantWritable, "kv.allocator.range_rebalance_threshold", "minimum fraction away from the mean a store's range count can be before it is considered overfull or underfull", 0.05, diff --git a/pkg/kv/kvserver/batcheval/cmd_export.go b/pkg/kv/kvserver/batcheval/cmd_export.go index 7e50896ebe8b..7eceeccf4058 100644 --- a/pkg/kv/kvserver/batcheval/cmd_export.go +++ b/pkg/kv/kvserver/batcheval/cmd_export.go @@ -34,6 +34,7 @@ const SSTTargetSizeSetting = "kv.bulk_sst.target_size" // ExportRequestTargetFileSize controls the target file size for SSTs created // during backups. var ExportRequestTargetFileSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, SSTTargetSizeSetting, fmt.Sprintf("target size for SSTs emitted from export requests; "+ "export requests (i.e. BACKUP) may buffer up to the sum of %s and %s in memory", @@ -51,6 +52,7 @@ const MaxExportOverageSetting = "kv.bulk_sst.max_allowed_overage" // and an SST would exceed this size (due to large rows or large numbers of // versions), then the export will fail. var ExportRequestMaxAllowedFileSizeOverage = settings.RegisterByteSizeSetting( + settings.TenantWritable, MaxExportOverageSetting, fmt.Sprintf("if positive, allowed size in excess of target size for SSTs from export requests; "+ "export requests (i.e. BACKUP) may buffer up to the sum of %s and %s in memory", @@ -65,6 +67,7 @@ var ExportRequestMaxAllowedFileSizeOverage = settings.RegisterByteSizeSetting( // If request takes longer than this threshold it would stop and return already // collected data and allow caller to use resume span to continue. var exportRequestMaxIterationTime = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.bulk_sst.max_request_time", "if set, limits amount of time spent in export requests; "+ "if export request can not finish within allocated time it will resume from the point it stopped in "+ diff --git a/pkg/kv/kvserver/batcheval/cmd_query_resolved_timestamp.go b/pkg/kv/kvserver/batcheval/cmd_query_resolved_timestamp.go index 07ca21fb2155..ecbf7b80754d 100644 --- a/pkg/kv/kvserver/batcheval/cmd_query_resolved_timestamp.go +++ b/pkg/kv/kvserver/batcheval/cmd_query_resolved_timestamp.go @@ -29,6 +29,7 @@ import ( // QueryResolvedTimestampIntentCleanupAge configures the minimum intent age that // QueryResolvedTimestamp requests will consider for async intent cleanup. var QueryResolvedTimestampIntentCleanupAge = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.query_resolved_timestamp.intent_cleanup_age", "minimum intent age that QueryResolvedTimestamp requests will consider for async intent cleanup", 10*time.Second, diff --git a/pkg/kv/kvserver/closedts/setting.go b/pkg/kv/kvserver/closedts/setting.go index ce5c760cee0c..91230241af37 100644 --- a/pkg/kv/kvserver/closedts/setting.go +++ b/pkg/kv/kvserver/closedts/setting.go @@ -18,6 +18,7 @@ import ( // TargetDuration is the follower reads closed timestamp update target duration. var TargetDuration = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.closed_timestamp.target_duration", "if nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this duration", 3*time.Second, @@ -26,6 +27,7 @@ var TargetDuration = settings.RegisterDurationSetting( // SideTransportCloseInterval determines the ClosedTimestampSender's frequency. var SideTransportCloseInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.closed_timestamp.side_transport_interval", "the interval at which the closed-timestamp side-transport attempts to "+ "advance each range's closed timestamp; set to 0 to disable the side-transport", @@ -38,6 +40,7 @@ var SideTransportCloseInterval = settings.RegisterDurationSetting( // (see TargetForPolicy), if it is set to a non-zero value. Meant as an escape // hatch. var LeadForGlobalReadsOverride = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.closed_timestamp.lead_for_global_reads_override", "if nonzero, overrides the lead time that global_read ranges use to publish closed timestamps", 0, diff --git a/pkg/kv/kvserver/concurrency/concurrency_manager.go b/pkg/kv/kvserver/concurrency/concurrency_manager.go index 1fa868c7b259..2089a348e7cd 100644 --- a/pkg/kv/kvserver/concurrency/concurrency_manager.go +++ b/pkg/kv/kvserver/concurrency/concurrency_manager.go @@ -56,6 +56,7 @@ import ( // utilization and runaway queuing for misbehaving clients, a role it is well // positioned to serve. var MaxLockWaitQueueLength = settings.RegisterIntSetting( + settings.TenantWritable, "kv.lock_table.maximum_lock_wait_queue_length", "the maximum length of a lock wait-queue that read-write requests are willing "+ "to enter and wait in. The setting can be used to ensure some level of quality-of-service "+ @@ -88,6 +89,7 @@ var MaxLockWaitQueueLength = settings.RegisterIntSetting( // discoveredCount > 100,000, caused by stats collection, where we definitely // want to avoid adding these locks to the lock table, if possible. var DiscoveredLocksThresholdToConsultFinalizedTxnCache = settings.RegisterIntSetting( + settings.TenantWritable, "kv.lock_table.discovered_locks_threshold_for_consulting_finalized_txn_cache", "the maximum number of discovered locks by a waiter, above which the finalized txn cache"+ "is consulted and resolvable locks are not added to the lock table -- this should be a small"+ diff --git a/pkg/kv/kvserver/concurrency/lock_table_waiter.go b/pkg/kv/kvserver/concurrency/lock_table_waiter.go index 9a77b137dab6..fa36287c96b0 100644 --- a/pkg/kv/kvserver/concurrency/lock_table_waiter.go +++ b/pkg/kv/kvserver/concurrency/lock_table_waiter.go @@ -36,6 +36,7 @@ import ( // LockTableLivenessPushDelay sets the delay before pushing in order to detect // coordinator failures of conflicting transactions. var LockTableLivenessPushDelay = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.lock_table.coordinator_liveness_push_delay", "the delay before pushing in order to detect coordinator failures of conflicting transactions", // This is set to a short duration to ensure that we quickly detect failed @@ -67,6 +68,7 @@ var LockTableLivenessPushDelay = settings.RegisterDurationSetting( // LockTableDeadlockDetectionPushDelay sets the delay before pushing in order to // detect dependency cycles between transactions. var LockTableDeadlockDetectionPushDelay = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.lock_table.deadlock_detection_push_delay", "the delay before pushing in order to detect dependency cycles between transactions", // This is set to a medium duration to ensure that deadlock caused by diff --git a/pkg/kv/kvserver/consistency_queue.go b/pkg/kv/kvserver/consistency_queue.go index 0e3af4753660..5f09ba9b3f3f 100644 --- a/pkg/kv/kvserver/consistency_queue.go +++ b/pkg/kv/kvserver/consistency_queue.go @@ -24,6 +24,7 @@ import ( ) var consistencyCheckInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "server.consistency_check.interval", "the time between range consistency checks; set to 0 to disable consistency checking."+ " Note that intervals that are too short can negatively impact performance.", @@ -32,6 +33,7 @@ var consistencyCheckInterval = settings.RegisterDurationSetting( ) var consistencyCheckRate = settings.RegisterByteSizeSetting( + settings.TenantWritable, "server.consistency_check.max_rate", "the rate limit (bytes/sec) to use for consistency checks; used in "+ "conjunction with server.consistency_check.interval to control the "+ diff --git a/pkg/kv/kvserver/gc/gc.go b/pkg/kv/kvserver/gc/gc.go index b8e137541510..d1d7b05518e8 100644 --- a/pkg/kv/kvserver/gc/gc.go +++ b/pkg/kv/kvserver/gc/gc.go @@ -49,6 +49,7 @@ const ( // IntentAgeThreshold is the threshold after which an extant intent // will be resolved. var IntentAgeThreshold = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.gc.intent_age_threshold", "intents older than this threshold will be resolved when encountered by the GC queue", 2*time.Hour, @@ -72,6 +73,7 @@ var IntentAgeThreshold = settings.RegisterDurationSetting( // of writing. This value is subject to tuning in real environment as we have // more data available. var MaxIntentsPerCleanupBatch = settings.RegisterIntSetting( + settings.TenantWritable, "kv.gc.intent_cleanup_batch_size", "if non zero, gc will split found intents into batches of this size when trying to resolve them", 5000, @@ -90,6 +92,7 @@ var MaxIntentsPerCleanupBatch = settings.RegisterIntSetting( // The default value is a conservative limit to prevent pending intent key sizes // from ballooning. var MaxIntentKeyBytesPerCleanupBatch = settings.RegisterIntSetting( + settings.TenantWritable, "kv.gc.intent_cleanup_batch_byte_size", "if non zero, gc will split found intents into batches of this size when trying to resolve them", 1e6, diff --git a/pkg/kv/kvserver/kvserverbase/base.go b/pkg/kv/kvserver/kvserverbase/base.go index 6fdfe1fa017d..11fa732c79f7 100644 --- a/pkg/kv/kvserver/kvserverbase/base.go +++ b/pkg/kv/kvserver/kvserverbase/base.go @@ -28,6 +28,7 @@ import ( // MergeQueueEnabled is a setting that controls whether the merge queue is // enabled. var MergeQueueEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.range_merge.queue_enabled", "whether the automatic merge queue is enabled", true, @@ -209,6 +210,7 @@ func IntersectSpan( // SplitByLoadMergeDelay wraps "kv.range_split.by_load_merge_delay". var SplitByLoadMergeDelay = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.range_split.by_load_merge_delay", "the delay that range splits created due to load will wait before considering being merged away", 5*time.Minute, diff --git a/pkg/kv/kvserver/merge_queue.go b/pkg/kv/kvserver/merge_queue.go index 76d7d29eb041..7f491ef03d8f 100644 --- a/pkg/kv/kvserver/merge_queue.go +++ b/pkg/kv/kvserver/merge_queue.go @@ -44,6 +44,7 @@ const ( // MergeQueueInterval is a setting that controls how often the merge queue waits // between processing replicas. var MergeQueueInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.range_merge.queue_interval", "how long the merge queue waits between processing replicas", 5*time.Second, diff --git a/pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go b/pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go index e7720726157a..f975777a5089 100644 --- a/pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go +++ b/pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go @@ -34,6 +34,7 @@ import ( // ReconcileInterval is the interval between two generations of the reports. // When set to zero - disables the report generation. var ReconcileInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.protectedts.reconciliation.interval", "the frequency for reconciling jobs with protected timestamp records", 5*time.Minute, diff --git a/pkg/kv/kvserver/protectedts/settings.go b/pkg/kv/kvserver/protectedts/settings.go index d40603c0299d..4add96509a48 100644 --- a/pkg/kv/kvserver/protectedts/settings.go +++ b/pkg/kv/kvserver/protectedts/settings.go @@ -22,6 +22,7 @@ import ( // MaxBytes controls the maximum number of bytes worth of spans and metadata // which can be protected by all protected timestamp records. var MaxBytes = settings.RegisterIntSetting( + settings.TenantWritable, "kv.protectedts.max_bytes", "if non-zero the limit of the number of bytes of spans and metadata which can be protected", 1<<20, // 1 MiB @@ -31,6 +32,7 @@ var MaxBytes = settings.RegisterIntSetting( // MaxSpans controls the maximum number of spans which can be protected // by all protected timestamp records. var MaxSpans = settings.RegisterIntSetting( + settings.TenantWritable, "kv.protectedts.max_spans", "if non-zero the limit of the number of spans which can be protected", 32768, @@ -40,6 +42,7 @@ var MaxSpans = settings.RegisterIntSetting( // PollInterval defines how frequently the protectedts state is polled by the // Tracker. var PollInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.protectedts.poll_interval", // TODO(ajwerner): better description. "the interval at which the protectedts subsystem state is polled", diff --git a/pkg/kv/kvserver/queue.go b/pkg/kv/kvserver/queue.go index f3f8af37f731..6813039e12bf 100644 --- a/pkg/kv/kvserver/queue.go +++ b/pkg/kv/kvserver/queue.go @@ -51,6 +51,7 @@ const ( // which the processing of a queue may time out. It is an escape hatch to raise // the timeout for queues. var queueGuaranteedProcessingTimeBudget = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.queue.process.guaranteed_time_budget", "the guaranteed duration before which the processing of a queue may "+ "time out", diff --git a/pkg/kv/kvserver/raft_transport.go b/pkg/kv/kvserver/raft_transport.go index 681d39fe7faf..6d35ecfb74dd 100644 --- a/pkg/kv/kvserver/raft_transport.go +++ b/pkg/kv/kvserver/raft_transport.go @@ -58,6 +58,7 @@ const ( // targetRaftOutgoingBatchSize wraps "kv.raft.command.target_batch_size". var targetRaftOutgoingBatchSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.raft.command.target_batch_size", "size of a batch of raft commands after which it will be sent without further batching", 64<<20, // 64 MB diff --git a/pkg/kv/kvserver/replica.go b/pkg/kv/kvserver/replica.go index 44f25fac97cf..e60c165827f3 100644 --- a/pkg/kv/kvserver/replica.go +++ b/pkg/kv/kvserver/replica.go @@ -81,6 +81,7 @@ const ( var testingDisableQuiescence = envutil.EnvOrDefaultBool("COCKROACH_DISABLE_QUIESCENCE", false) var disableSyncRaftLog = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.raft_log.disable_synchronization_unsafe", "set to true to disable synchronization on Raft log writes to persistent storage. "+ "Setting to true risks data loss or data corruption on server crashes. "+ @@ -99,6 +100,7 @@ const ( // MaxCommandSize wraps "kv.raft.command.max_size". var MaxCommandSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.raft.command.max_size", "maximum size of a raft command", MaxCommandSizeDefault, @@ -114,6 +116,7 @@ var MaxCommandSize = settings.RegisterByteSizeSetting( // threshold and the current GC TTL (true) or just based on the GC threshold // (false). var StrictGCEnforcement = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.gc_ttl.strict_enforcement.enabled", "if true, fail to serve requests at timestamps below the TTL even if the data still exists", true, diff --git a/pkg/kv/kvserver/replica_backpressure.go b/pkg/kv/kvserver/replica_backpressure.go index e03dcdf22b4a..1c98cccdd008 100644 --- a/pkg/kv/kvserver/replica_backpressure.go +++ b/pkg/kv/kvserver/replica_backpressure.go @@ -27,6 +27,7 @@ var backpressureLogLimiter = log.Every(500 * time.Millisecond) // range's size must grow to before backpressure will be applied on writes. Set // to 0 to disable backpressure altogether. var backpressureRangeSizeMultiplier = settings.RegisterFloatSetting( + settings.TenantWritable, "kv.range.backpressure_range_size_multiplier", "multiple of range_max_bytes that a range is allowed to grow to without "+ "splitting before writes to that range are blocked, or 0 to disable", @@ -65,6 +66,7 @@ var backpressureRangeSizeMultiplier = settings.RegisterFloatSetting( // applying backpressure. // var backpressureByteTolerance = settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.range.backpressure_byte_tolerance", "defines the number of bytes above the product of "+ "backpressure_range_size_multiplier and the range_max_size at which "+ diff --git a/pkg/kv/kvserver/replica_follower_read.go b/pkg/kv/kvserver/replica_follower_read.go index 191de425fc88..21615ade3096 100644 --- a/pkg/kv/kvserver/replica_follower_read.go +++ b/pkg/kv/kvserver/replica_follower_read.go @@ -26,6 +26,7 @@ import ( // information is collected and passed around, regardless of the value of this // setting. var FollowerReadsEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.closed_timestamp.follower_reads_enabled", "allow (all) replicas to serve consistent historical reads based on closed timestamp information", true, diff --git a/pkg/kv/kvserver/replica_rangefeed.go b/pkg/kv/kvserver/replica_rangefeed.go index cb052980a8b7..270c1f1015bd 100644 --- a/pkg/kv/kvserver/replica_rangefeed.go +++ b/pkg/kv/kvserver/replica_rangefeed.go @@ -41,6 +41,7 @@ import ( // RangefeedEnabled is a cluster setting that enables rangefeed requests. var RangefeedEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.rangefeed.enabled", "if set, rangefeed registration is enabled", false, @@ -49,6 +50,7 @@ var RangefeedEnabled = settings.RegisterBoolSetting( // RangeFeedRefreshInterval controls the frequency with which we deliver closed // timestamp updates to rangefeeds. var RangeFeedRefreshInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.rangefeed.closed_timestamp_refresh_interval", "the interval at which closed-timestamp updates"+ "are delivered to rangefeeds; set to 0 to use kv.closed_timestamp.side_transport_interval", @@ -58,6 +60,7 @@ var RangeFeedRefreshInterval = settings.RegisterDurationSetting( // RangefeedTBIEnabled controls whether or not we use a TBI during catch-up scan. var RangefeedTBIEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.rangefeed.catchup_scan_iterator_optimization.enabled", "if true, rangefeeds will use time-bound iterators for catchup-scans when possible", util.ConstantWithMetamorphicTestBool("kv.rangefeed.catchup_scan_iterator_optimization.enabled", true), diff --git a/pkg/kv/kvserver/replica_send.go b/pkg/kv/kvserver/replica_send.go index a7cb4d34075f..86956fb884ea 100644 --- a/pkg/kv/kvserver/replica_send.go +++ b/pkg/kv/kvserver/replica_send.go @@ -27,6 +27,7 @@ import ( ) var optimisticEvalLimitedScans = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.concurrency.optimistic_eval_limited_scans.enabled", "when true, limited scans are optimistically evaluated in the sense of not checking for "+ "conflicting latches or locks up front for the full key range of the scan, and instead "+ diff --git a/pkg/kv/kvserver/replica_split_load.go b/pkg/kv/kvserver/replica_split_load.go index 757ea48c6681..15adcc0dad81 100644 --- a/pkg/kv/kvserver/replica_split_load.go +++ b/pkg/kv/kvserver/replica_split_load.go @@ -21,6 +21,7 @@ import ( // SplitByLoadEnabled wraps "kv.range_split.by_load_enabled". var SplitByLoadEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.range_split.by_load_enabled", "allow automatic splits of ranges based on where load is concentrated", true, @@ -28,6 +29,7 @@ var SplitByLoadEnabled = settings.RegisterBoolSetting( // SplitByLoadQPSThreshold wraps "kv.range_split.load_qps_threshold". var SplitByLoadQPSThreshold = settings.RegisterIntSetting( + settings.TenantWritable, "kv.range_split.load_qps_threshold", "the QPS over which, the range becomes a candidate for load based splitting", 2500, // 2500 req/s diff --git a/pkg/kv/kvserver/replica_write.go b/pkg/kv/kvserver/replica_write.go index 58cb43de74c2..723a3c7eca68 100644 --- a/pkg/kv/kvserver/replica_write.go +++ b/pkg/kv/kvserver/replica_write.go @@ -43,6 +43,7 @@ import ( // TODO(erikgrinaker): this, and the timeout handling, should be moved into a // migration helper that manages checkpointing and retries as well. var migrateApplicationTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.migration.migrate_application.timeout", "timeout for a Migrate request to be applied across all replicas of a range", 1*time.Minute, diff --git a/pkg/kv/kvserver/replicate_queue.go b/pkg/kv/kvserver/replicate_queue.go index 75e12ffcaa48..391fad611f96 100644 --- a/pkg/kv/kvserver/replicate_queue.go +++ b/pkg/kv/kvserver/replicate_queue.go @@ -51,6 +51,7 @@ const ( // for rebalancing. It does not prevent transferring leases in order to allow // a replica to be removed from a range. var MinLeaseTransferInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.allocator.min_lease_transfer_interval", "controls how frequently leases can be transferred for rebalancing. "+ "It does not prevent transferring leases in order to allow a "+ diff --git a/pkg/kv/kvserver/reports/reporter.go b/pkg/kv/kvserver/reports/reporter.go index 2c60a61a4ff6..98335afefa13 100644 --- a/pkg/kv/kvserver/reports/reporter.go +++ b/pkg/kv/kvserver/reports/reporter.go @@ -43,6 +43,7 @@ import ( // ReporterInterval is the interval between two generations of the reports. // When set to zero - disables the report generation. var ReporterInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "kv.replication_reports.interval", "the frequency for generating the replication_constraint_stats, replication_stats_report and "+ "replication_critical_localities reports (set to 0 to disable)", diff --git a/pkg/kv/kvserver/store.go b/pkg/kv/kvserver/store.go index 315d4ab6def3..a8a10cb2236e 100644 --- a/pkg/kv/kvserver/store.go +++ b/pkg/kv/kvserver/store.go @@ -120,6 +120,7 @@ var logSSTInfoTicks = envutil.EnvOrDefaultInt( // bulkIOWriteLimit is defined here because it is used by BulkIOWriteLimiter. var bulkIOWriteLimit = settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.bulk_io_write.max_rate", "the rate limit (bytes/sec) to use for writes to disk on behalf of bulk io ops", 1<<40, @@ -127,6 +128,7 @@ var bulkIOWriteLimit = settings.RegisterByteSizeSetting( // addSSTableRequestLimit limits concurrent AddSSTable requests. var addSSTableRequestLimit = settings.RegisterIntSetting( + settings.TenantWritable, "kv.bulk_io_write.concurrent_addsstable_requests", "number of concurrent AddSSTable requests per store before queueing", 1, @@ -139,6 +141,7 @@ var addSSTableRequestLimit = settings.RegisterIntSetting( // disk, so we can allow a greater amount of concurrency than regular AddSSTable // requests. Applied independently of concurrent_addsstable_requests. var addSSTableAsWritesRequestLimit = settings.RegisterIntSetting( + settings.TenantWritable, "kv.bulk_io_write.concurrent_addsstable_as_writes_requests", "number of concurrent AddSSTable requests ingested as writes per store before queueing", 10, @@ -147,6 +150,7 @@ var addSSTableAsWritesRequestLimit = settings.RegisterIntSetting( // concurrentRangefeedItersLimit limits concurrent rangefeed catchup iterators. var concurrentRangefeedItersLimit = settings.RegisterIntSetting( + settings.TenantWritable, "kv.rangefeed.concurrent_catchup_iterators", "number of rangefeeds catchup iterators a store will allow concurrently before queueing", 64, @@ -157,6 +161,7 @@ var concurrentRangefeedItersLimit = settings.RegisterIntSetting( // ScanInterleavedIntents requests that will be run on a store. Used as part // of pre-evaluation throttling. var concurrentscanInterleavedIntentsLimit = settings.RegisterIntSetting( + settings.TenantWritable, "kv.migration.concurrent_scan_interleaved_intents", "number of scan interleaved intents requests a store will handle concurrently before queueing", 1, @@ -166,6 +171,7 @@ var concurrentscanInterleavedIntentsLimit = settings.RegisterIntSetting( // Minimum time interval between system config updates which will lead to // enqueuing replicas. var queueAdditionOnSystemConfigUpdateRate = settings.RegisterFloatSetting( + settings.TenantWritable, "kv.store.system_config_update.queue_add_rate", "the rate (per second) at which the store will add, all replicas to the split and merge queue due to system config gossip", .5, @@ -176,6 +182,7 @@ var queueAdditionOnSystemConfigUpdateRate = settings.RegisterFloatSetting( // enqueuing replicas. The default is relatively high to deal with startup // scenarios. var queueAdditionOnSystemConfigUpdateBurst = settings.RegisterIntSetting( + settings.TenantWritable, "kv.store.system_config_update.queue_add_burst", "the burst rate at which the store will add all replicas to the split and merge queue due to system config gossip", 32, @@ -186,6 +193,7 @@ var queueAdditionOnSystemConfigUpdateBurst = settings.RegisterIntSetting( // and Raft leadership transfers. var leaseTransferWait = func() *settings.DurationSetting { s := settings.RegisterDurationSetting( + settings.TenantWritable, leaseTransferWaitSettingName, "the amount of time a server waits to transfer range leases before proceeding with the rest of the shutdown process "+ "(note that the --drain-wait parameter for cockroach node drain may need adjustment "+ @@ -213,6 +221,7 @@ const leaseTransferWaitSettingName = "server.shutdown.lease_transfer_wait" // here since we check it in in the caller to limit generated requests as well // to prevent excessive queuing. var ExportRequestsLimit = settings.RegisterIntSetting( + settings.TenantWritable, "kv.bulk_io_write.concurrent_export_requests", "number of export requests a store will handle concurrently before queuing", 3, diff --git a/pkg/kv/kvserver/store_pool.go b/pkg/kv/kvserver/store_pool.go index 94c23fd6d34c..b0e0d98052c4 100644 --- a/pkg/kv/kvserver/store_pool.go +++ b/pkg/kv/kvserver/store_pool.go @@ -45,6 +45,7 @@ const ( // replicate queue will not consider stores which have failed a reservation a // viable target. var FailedReservationsTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "server.failed_reservation_timeout", "the amount of time to consider the store throttled for up-replication after a failed reservation call", 5*time.Second, @@ -56,6 +57,7 @@ const timeAfterStoreSuspectSettingName = "server.time_after_store_suspect" // TimeAfterStoreSuspect measures how long we consider a store suspect since // it's last failure. var TimeAfterStoreSuspect = settings.RegisterDurationSetting( + settings.TenantWritable, timeAfterStoreSuspectSettingName, "the amount of time we consider a store suspect for after it fails a node liveness heartbeat."+ " A suspect node would not receive any new replicas or lease transfers, but will keep the replicas it has.", @@ -79,6 +81,7 @@ const timeUntilStoreDeadSettingName = "server.time_until_store_dead" // TimeUntilStoreDead wraps "server.time_until_store_dead". var TimeUntilStoreDead = func() *settings.DurationSetting { s := settings.RegisterDurationSetting( + settings.TenantWritable, timeUntilStoreDeadSettingName, "the time after which if there is no new gossiped information about a store, it is considered dead", 5*time.Minute, diff --git a/pkg/kv/kvserver/store_rebalancer.go b/pkg/kv/kvserver/store_rebalancer.go index d28f86450a8a..c7367f088dcc 100644 --- a/pkg/kv/kvserver/store_rebalancer.go +++ b/pkg/kv/kvserver/store_rebalancer.go @@ -72,6 +72,7 @@ func makeStoreRebalancerMetrics() StoreRebalancerMetrics { // additional variables such as write load and disk usage into account. // If disabled, rebalancing is done purely based on replica count. var LoadBasedRebalancingMode = settings.RegisterEnumSetting( + settings.TenantWritable, "kv.allocator.load_based_rebalancing", "whether to rebalance based on the distribution of QPS across stores", "leases and replicas", @@ -89,6 +90,7 @@ var LoadBasedRebalancingMode = settings.RegisterEnumSetting( // forgiving to avoid thrashing. var qpsRebalanceThreshold = func() *settings.FloatSetting { s := settings.RegisterFloatSetting( + settings.TenantWritable, "kv.allocator.qps_rebalance_threshold", "minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfull", 0.25, diff --git a/pkg/kv/kvserver/store_snapshot.go b/pkg/kv/kvserver/store_snapshot.go index 0276938a6e02..22309ba010a3 100644 --- a/pkg/kv/kvserver/store_snapshot.go +++ b/pkg/kv/kvserver/store_snapshot.go @@ -696,11 +696,12 @@ func validatePositive(v int64) error { // context of up-replication or rebalancing (i.e. any snapshot that was not // requested by raft itself, to which `kv.snapshot_recovery.max_rate` applies). var rebalanceSnapshotRate = settings.RegisterByteSizeSetting( + settings.SystemOnly, "kv.snapshot_rebalance.max_rate", "the rate limit (bytes/sec) to use for rebalance and upreplication snapshots", 32<<20, // 32mb/s validatePositive, -).WithPublic().WithSystemOnly() +).WithPublic() // recoverySnapshotRate is the rate at which Raft-initiated spanshots can be // sent. Ideally, one would never see a Raft-initiated snapshot; we'd like all @@ -712,17 +713,19 @@ var rebalanceSnapshotRate = settings.RegisterByteSizeSetting( // to a semaphore at the receiver, and so the slower one ultimately determines // the pace at which things can move along. var recoverySnapshotRate = settings.RegisterByteSizeSetting( + settings.SystemOnly, "kv.snapshot_recovery.max_rate", "the rate limit (bytes/sec) to use for recovery snapshots", 32<<20, // 32mb/s validatePositive, -).WithPublic().WithSystemOnly() +).WithPublic() // snapshotSenderBatchSize is the size that key-value batches are allowed to // grow to during Range snapshots before being sent to the receiver. This limit // places an upper-bound on the memory footprint of the sender of a Range // snapshot. It is also the granularity of rate limiting. var snapshotSenderBatchSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.snapshot_sender.batch_size", "size of key-value batches sent over the network during snapshots", 256<<10, // 256 KB @@ -733,6 +736,7 @@ var snapshotSenderBatchSize = settings.RegisterByteSizeSetting( // The default of 2 MiB was chosen to be in line with the behavior in bulk-io. // See sstWriteSyncRate. var snapshotSSTWriteSyncRate = settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.snapshot_sst.sync_size", "threshold after which snapshot SST writes must fsync", bulkIOWriteBurst, diff --git a/pkg/kv/kvserver/syncing_write.go b/pkg/kv/kvserver/syncing_write.go index fc13fd81a432..7714dd489e39 100644 --- a/pkg/kv/kvserver/syncing_write.go +++ b/pkg/kv/kvserver/syncing_write.go @@ -63,6 +63,7 @@ func limitBulkIOWrite(ctx context.Context, limiter *rate.Limiter, cost int) erro // sstWriteSyncRate wraps "kv.bulk_sst.sync_size". 0 disables syncing. var sstWriteSyncRate = settings.RegisterByteSizeSetting( + settings.TenantWritable, "kv.bulk_sst.sync_size", "threshold after which non-Rocks SST writes must fsync (0 disables)", bulkIOWriteBurst, diff --git a/pkg/kv/kvserver/tenantrate/settings.go b/pkg/kv/kvserver/tenantrate/settings.go index 147cf8ae9370..e4945f27f729 100644 --- a/pkg/kv/kvserver/tenantrate/settings.go +++ b/pkg/kv/kvserver/tenantrate/settings.go @@ -64,6 +64,7 @@ var ( // per CPU, or roughly 20% of the machine (by design 1 RU roughly maps to 1 // CPU-millisecond). kvcuRateLimit = settings.RegisterFloatSetting( + settings.TenantWritable, "kv.tenant_rate_limiter.rate_limit", "per-tenant rate limit in KV Compute Units per second if positive, "+ "or KV Compute Units per second per CPU if negative", @@ -77,6 +78,7 @@ var ( ) kvcuBurstLimitSeconds = settings.RegisterFloatSetting( + settings.TenantWritable, "kv.tenant_rate_limiter.burst_limit_seconds", "per-tenant burst limit as a multiplier of the rate", 10, @@ -84,6 +86,7 @@ var ( ) readRequestCost = settings.RegisterFloatSetting( + settings.TenantWritable, "kv.tenant_rate_limiter.read_request_cost", "base cost of a read request in KV Compute Units", 0.7, @@ -91,6 +94,7 @@ var ( ) readCostPerMB = settings.RegisterFloatSetting( + settings.TenantWritable, "kv.tenant_rate_limiter.read_cost_per_megabyte", "cost of a read in KV Compute Units per MB", 10.0, @@ -98,6 +102,7 @@ var ( ) writeRequestCost = settings.RegisterFloatSetting( + settings.TenantWritable, "kv.tenant_rate_limiter.write_request_cost", "base cost of a write request in KV Compute Units", 1.0, @@ -105,6 +110,7 @@ var ( ) writeCostPerMB = settings.RegisterFloatSetting( + settings.TenantWritable, "kv.tenant_rate_limiter.write_cost_per_megabyte", "cost of a write in KV Compute Units per MB", 400.0, diff --git a/pkg/multitenant/tenantcostmodel/settings.go b/pkg/multitenant/tenantcostmodel/settings.go index 4047d1675edf..95d34b83aabf 100644 --- a/pkg/multitenant/tenantcostmodel/settings.go +++ b/pkg/multitenant/tenantcostmodel/settings.go @@ -27,6 +27,7 @@ import ( // from the host cluster. var ( readRequestCost = settings.RegisterFloatSetting( + settings.TenantWritable, "tenant_cost_model.kv_read_request_cost", "base cost of a read request in Request Units", 0.6993, @@ -34,6 +35,7 @@ var ( ) readCostPerMB = settings.RegisterFloatSetting( + settings.TenantWritable, "tenant_cost_model.kv_read_cost_per_megabyte", "cost of a read in Request Units per MB", 107.6563, @@ -41,6 +43,7 @@ var ( ) writeRequestCost = settings.RegisterFloatSetting( + settings.TenantWritable, "tenant_cost_model.kv_write_request_cost", "base cost of a write request in Request Units", 5.7733, @@ -48,6 +51,7 @@ var ( ) writeCostPerMB = settings.RegisterFloatSetting( + settings.TenantWritable, "tenant_cost_model.kv_write_cost_per_megabyte", "cost of a write in Request Units per MB", 2026.3021, @@ -55,6 +59,7 @@ var ( ) podCPUSecondCost = settings.RegisterFloatSetting( + settings.TenantWritable, "tenant_cost_model.pod_cpu_second_cost", "cost of a CPU-second on the tenant POD in Request Units", 1000.0, @@ -62,6 +67,7 @@ var ( ) pgwireEgressCostPerMB = settings.RegisterFloatSetting( + settings.TenantWritable, "tenant_cost_model.pgwire_egress_cost_per_megabyte", "cost of client <-> SQL ingress/egress per MB", 878.9063, diff --git a/pkg/security/password.go b/pkg/security/password.go index 5f6d7bad81da..2b9575a8f6a9 100644 --- a/pkg/security/password.go +++ b/pkg/security/password.go @@ -85,6 +85,7 @@ func HashPassword(ctx context.Context, password string) ([]byte, error) { // AutoDetectPasswordHashes is the cluster setting that configures whether // the server recognizes pre-hashed passwords. var AutoDetectPasswordHashes = settings.RegisterBoolSetting( + settings.TenantWritable, "server.user_login.store_client_pre_hashed_passwords.enabled", "whether the server accepts to store passwords pre-hashed by clients", true, @@ -159,6 +160,7 @@ func CheckPasswordHashValidity( // MinPasswordLength is the cluster setting that configures the // minimum SQL password length. var MinPasswordLength = settings.RegisterIntSetting( + settings.TenantWritable, "server.user_login.min_password_length", "the minimum length accepted for passwords set in cleartext via SQL. "+ "Note that a value lower than 1 is ignored: passwords cannot be empty in any case.", diff --git a/pkg/security/tls_settings.go b/pkg/security/tls_settings.go index 2016922a2288..f5a82191c4a8 100644 --- a/pkg/security/tls_settings.go +++ b/pkg/security/tls_settings.go @@ -33,7 +33,8 @@ type TLSSettings interface { ocspTimeout() time.Duration } -var ocspMode = settings.RegisterEnumSetting("security.ocsp.mode", +var ocspMode = settings.RegisterEnumSetting( + settings.TenantWritable, "security.ocsp.mode", "use OCSP to check whether TLS certificates are revoked. If the OCSP "+ "server is unreachable, in strict mode all certificates will be rejected "+ "and in lax mode all certificates will be accepted.", @@ -42,7 +43,8 @@ var ocspMode = settings.RegisterEnumSetting("security.ocsp.mode", // TODO(bdarnell): 3 seconds is the same as base.NetworkTimeout, but // we can't use it here due to import cycles. We need a real // no-dependencies base package for constants like this. -var ocspTimeout = settings.RegisterDurationSetting("security.ocsp.timeout", +var ocspTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "security.ocsp.timeout", "timeout before considering the OCSP server unreachable", 3*time.Second, settings.NonNegativeDuration, diff --git a/pkg/server/authentication.go b/pkg/server/authentication.go index ef0743dd650c..fca93d6a9034 100644 --- a/pkg/server/authentication.go +++ b/pkg/server/authentication.go @@ -87,6 +87,7 @@ var ConfigureOIDC = func( } var webSessionTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "server.web_session_timeout", "the duration that a newly created web session will be valid", 7*24*time.Hour, diff --git a/pkg/server/debug/server.go b/pkg/server/debug/server.go index 24e16fa8d0e5..788b99b47b3c 100644 --- a/pkg/server/debug/server.go +++ b/pkg/server/debug/server.go @@ -53,7 +53,8 @@ const Endpoint = "/debug/" var _ = func() *settings.StringSetting { // This setting definition still exists so as to not break // deployment scripts that set it unconditionally. - v := settings.RegisterStringSetting("server.remote_debugging.mode", "unused", "local") + v := settings.RegisterStringSetting( + settings.TenantWritable, "server.remote_debugging.mode", "unused", "local") v.SetRetired() return v }() diff --git a/pkg/server/diagnostics/reporter.go b/pkg/server/diagnostics/reporter.go index e2c06c54e3e7..0e7b3965c58d 100644 --- a/pkg/server/diagnostics/reporter.go +++ b/pkg/server/diagnostics/reporter.go @@ -57,6 +57,7 @@ type NodeStatusGenerator interface { } var reportFrequency = settings.RegisterDurationSetting( + settings.TenantWritable, "diagnostics.reporting.interval", "interval at which diagnostics data should be reported", time.Hour, diff --git a/pkg/server/drain.go b/pkg/server/drain.go index 1bd8a3c2c59b..c59c7127bda3 100644 --- a/pkg/server/drain.go +++ b/pkg/server/drain.go @@ -29,6 +29,7 @@ import ( var ( queryWait = settings.RegisterDurationSetting( + settings.TenantWritable, "server.shutdown.query_wait", "the server will wait for at least this amount of time for active queries to finish "+ "(note that the --drain-wait parameter for cockroach node drain may need adjustment "+ @@ -37,6 +38,7 @@ var ( ).WithPublic() drainWait = settings.RegisterDurationSetting( + settings.TenantWritable, "server.shutdown.drain_wait", "the amount of time a server waits in an unready state before proceeding with the rest "+ "of the shutdown process "+ diff --git a/pkg/server/goroutinedumper/goroutinedumper.go b/pkg/server/goroutinedumper/goroutinedumper.go index ebd8d39e47f0..4a8bd446f040 100644 --- a/pkg/server/goroutinedumper/goroutinedumper.go +++ b/pkg/server/goroutinedumper/goroutinedumper.go @@ -34,12 +34,14 @@ const ( var ( numGoroutinesThreshold = settings.RegisterIntSetting( + settings.TenantWritable, "server.goroutine_dump.num_goroutines_threshold", "a threshold beyond which if number of goroutines increases, "+ "then goroutine dump can be triggered", 1000, ) totalDumpSizeLimit = settings.RegisterByteSizeSetting( + settings.TenantWritable, "server.goroutine_dump.total_dump_size_limit", "total size of goroutine dumps to be kept. "+ "Dumps are GC'ed in the order of creation time. The latest dump is "+ diff --git a/pkg/server/heapprofiler/cluster_settings.go b/pkg/server/heapprofiler/cluster_settings.go index edc37963396b..3d4a70056593 100644 --- a/pkg/server/heapprofiler/cluster_settings.go +++ b/pkg/server/heapprofiler/cluster_settings.go @@ -21,7 +21,8 @@ import "github.com/cockroachdb/cockroach/pkg/settings" // Note: this feature only works for nodes running on unix hosts with cgroups // enabled. var ActiveQueryDumpsEnabled = settings.RegisterBoolSetting( + settings.SystemOnly, "diagnostics.active_query_dumps.enabled", "experimental: enable dumping of anonymized active queries to disk when node is under memory pressure", true, -).WithPublic().WithSystemOnly() +).WithPublic() diff --git a/pkg/server/heapprofiler/profilestore.go b/pkg/server/heapprofiler/profilestore.go index 8004a236e5bf..921947c81560 100644 --- a/pkg/server/heapprofiler/profilestore.go +++ b/pkg/server/heapprofiler/profilestore.go @@ -28,6 +28,7 @@ import ( var ( maxProfiles = settings.RegisterIntSetting( + settings.TenantWritable, "server.mem_profile.max_profiles", "maximum number of profiles to be kept per ramp-up of memory usage. "+ "A ramp-up is defined as a sequence of profiles with increasing usage.", @@ -35,6 +36,7 @@ var ( ) maxCombinedFileSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "server.mem_profile.total_dump_size_limit", "maximum combined disk size of preserved memory profiles", 128<<20, // 128MiB @@ -43,10 +45,12 @@ var ( func init() { s := settings.RegisterIntSetting( + settings.TenantWritable, "server.heap_profile.max_profiles", "use server.mem_profile.max_profiles instead", 5) s.SetRetired() b := settings.RegisterByteSizeSetting( + settings.TenantWritable, "server.heap_profile.total_dump_size_limit", "use server.mem_profile.total_dump_size_limit instead", 128<<20, // 128MiB diff --git a/pkg/server/node.go b/pkg/server/node.go index 5634c26b2876..592ccee94208 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -134,12 +134,14 @@ This metric is thus not an indicator of KV health.`, var ( // graphiteEndpoint is host:port, if any, of Graphite metrics server. graphiteEndpoint = settings.RegisterStringSetting( + settings.TenantWritable, "external.graphite.endpoint", "if nonempty, push server metrics to the Graphite or Carbon server at the specified host:port", "", ).WithPublic() // graphiteInterval is how often metrics are pushed to Graphite, if enabled. graphiteInterval = settings.RegisterDurationSetting( + settings.TenantWritable, graphiteIntervalKey, "the interval at which metrics are pushed to Graphite (if enabled)", 10*time.Second, diff --git a/pkg/server/purge_auth_session.go b/pkg/server/purge_auth_session.go index 46ff2dd884ba..30966c18c6e7 100644 --- a/pkg/server/purge_auth_session.go +++ b/pkg/server/purge_auth_session.go @@ -25,12 +25,14 @@ import ( var ( webSessionPurgeTTL = settings.RegisterDurationSetting( + settings.TenantWritable, "server.web_session.purge.ttl", "if nonzero, entries in system.web_sessions older than this duration are periodically purged", time.Hour, ).WithPublic() webSessionAutoLogoutTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "server.web_session.auto_logout.timeout", "the duration that web sessions will survive before being periodically purged, since they were last used", 7*24*time.Hour, @@ -38,6 +40,7 @@ var ( ).WithPublic() webSessionPurgePeriod = settings.RegisterDurationSetting( + settings.TenantWritable, "server.web_session.purge.period", "the time until old sessions are deleted", time.Hour, @@ -45,6 +48,7 @@ var ( ).WithPublic() webSessionPurgeLimit = settings.RegisterIntSetting( + settings.TenantWritable, "server.web_session.purge.max_deletions_per_cycle", "the maximum number of old sessions to delete for each purge", 10, diff --git a/pkg/server/server.go b/pkg/server/server.go index b07d6ab5aa91..2cc1e91e6f24 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -118,12 +118,14 @@ var ( gzipResponseWriterPool sync.Pool forwardClockJumpCheckEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "server.clock.forward_jump_check_enabled", "if enabled, forward clock jumps > max_offset/2 will cause a panic", false, ).WithPublic() persistHLCUpperBoundInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "server.clock.persist_upper_bound_interval", "the interval between persisting the wall time upper bound of the clock. The clock "+ "does not generate a wall time greater than the persisted timestamp and will panic if "+ diff --git a/pkg/server/server_systemlog_gc.go b/pkg/server/server_systemlog_gc.go index 57d3e99f5bd3..249f0a68782f 100644 --- a/pkg/server/server_systemlog_gc.go +++ b/pkg/server/server_systemlog_gc.go @@ -36,6 +36,7 @@ var ( // rangeLogTTL is the TTL for rows in system.rangelog. If non zero, range log // entries are periodically garbage collected. rangeLogTTL = settings.RegisterDurationSetting( + settings.TenantWritable, "server.rangelog.ttl", fmt.Sprintf( "if nonzero, range log entries older than this duration are deleted every %s. "+ @@ -48,6 +49,7 @@ var ( // eventLogTTL is the TTL for rows in system.eventlog. If non zero, event log // entries are periodically garbage collected. eventLogTTL = settings.RegisterDurationSetting( + settings.TenantWritable, "server.eventlog.ttl", fmt.Sprintf( "if nonzero, entries in system.eventlog older than this duration are deleted every %s. "+ diff --git a/pkg/server/settingsworker_test.go b/pkg/server/settingsworker_test.go index 339c3e3fdbac..0e4e1b1e6e9a 100644 --- a/pkg/server/settingsworker_test.go +++ b/pkg/server/settingsworker_test.go @@ -33,32 +33,37 @@ const durationKey = "testing.duration" const byteSizeKey = "testing.bytesize" const enumKey = "testing.enum" -var strA = settings.RegisterValidatedStringSetting(strKey, "desc", "", func(sv *settings.Values, v string) error { - if len(v) > 15 { - return errors.Errorf("can't set %s to string longer than 15: %s", strKey, v) - } - return nil -}) -var intA = settings.RegisterIntSetting(intKey, "desc", 1, func(v int64) error { - if v < 0 { - return errors.Errorf("can't set %s to a negative value: %d", intKey, v) - } - return nil +var strA = settings.RegisterValidatedStringSetting( + settings.TenantWritable, strKey, "desc", "", func(sv *settings.Values, v string) error { + if len(v) > 15 { + return errors.Errorf("can't set %s to string longer than 15: %s", strKey, v) + } + return nil + }) +var intA = settings.RegisterIntSetting( + settings.TenantWritable, intKey, "desc", 1, func(v int64) error { + if v < 0 { + return errors.Errorf("can't set %s to a negative value: %d", intKey, v) + } + return nil -}) -var durationA = settings.RegisterDurationSetting(durationKey, "desc", time.Minute, func(v time.Duration) error { - if v < 0 { - return errors.Errorf("can't set %s to a negative duration: %s", durationKey, v) - } - return nil -}) -var byteSizeA = settings.RegisterByteSizeSetting(byteSizeKey, "desc", 1024*1024, func(v int64) error { - if v < 0 { - return errors.Errorf("can't set %s to a negative value: %d", byteSizeKey, v) - } - return nil -}) -var enumA = settings.RegisterEnumSetting(enumKey, "desc", "foo", map[int64]string{1: "foo", 2: "bar"}) + }) +var durationA = settings.RegisterDurationSetting( + settings.TenantWritable, durationKey, "desc", time.Minute, func(v time.Duration) error { + if v < 0 { + return errors.Errorf("can't set %s to a negative duration: %s", durationKey, v) + } + return nil + }) +var byteSizeA = settings.RegisterByteSizeSetting( + settings.TenantWritable, byteSizeKey, "desc", 1024*1024, func(v int64) error { + if v < 0 { + return errors.Errorf("can't set %s to a negative value: %d", byteSizeKey, v) + } + return nil + }) +var enumA = settings.RegisterEnumSetting( + settings.TenantWritable, enumKey, "desc", "foo", map[int64]string{1: "foo", 2: "bar"}) func TestSettingsRefresh(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/server/status/recorder.go b/pkg/server/status/recorder.go index d61929c8496e..e1a5bc8d6c6d 100644 --- a/pkg/server/status/recorder.go +++ b/pkg/server/status/recorder.go @@ -87,7 +87,8 @@ type storeMetrics interface { Registry() *metric.Registry } -var childMetricsEnabled = settings.RegisterBoolSetting("server.child_metrics.enabled", +var childMetricsEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "server.child_metrics.enabled", "enables the exporting of child metrics, additional prometheus time series with extra labels", false) diff --git a/pkg/server/tracedumper/tracedumper.go b/pkg/server/tracedumper/tracedumper.go index 0ac0debc1613..0f42fa572e5b 100644 --- a/pkg/server/tracedumper/tracedumper.go +++ b/pkg/server/tracedumper/tracedumper.go @@ -34,6 +34,7 @@ const ( var ( totalDumpSizeLimit = settings.RegisterByteSizeSetting( + settings.TenantWritable, "server.job_trace.total_dump_size_limit", "total size of job trace dumps to be kept. "+ "Dumps are GC'ed in the order of creation time. The latest dump is "+ diff --git a/pkg/settings/bool.go b/pkg/settings/bool.go index 7c2449007008..312c27cb6955 100644 --- a/pkg/settings/bool.go +++ b/pkg/settings/bool.go @@ -92,15 +92,9 @@ func (b *BoolSetting) WithPublic() *BoolSetting { return b } -// WithSystemOnly marks this setting as system-only and can be chained. -func (b *BoolSetting) WithSystemOnly() *BoolSetting { - b.common.systemOnly = true - return b -} - // RegisterBoolSetting defines a new setting with type bool. -func RegisterBoolSetting(key, desc string, defaultValue bool) *BoolSetting { +func RegisterBoolSetting(class Class, key, desc string, defaultValue bool) *BoolSetting { setting := &BoolSetting{defaultValue: defaultValue} - register(key, desc, setting) + register(class, key, desc, setting) return setting } diff --git a/pkg/settings/byte_size.go b/pkg/settings/byte_size.go index 75b641069784..9b578e05904d 100644 --- a/pkg/settings/byte_size.go +++ b/pkg/settings/byte_size.go @@ -39,16 +39,10 @@ func (b *ByteSizeSetting) WithPublic() *ByteSizeSetting { return b } -// WithSystemOnly marks this setting as system-only and can be chained. -func (b *ByteSizeSetting) WithSystemOnly() *ByteSizeSetting { - b.common.systemOnly = true - return b -} - // RegisterByteSizeSetting defines a new setting with type bytesize and any // supplied validation function(s). func RegisterByteSizeSetting( - key, desc string, defaultValue int64, validateFns ...func(int64) error, + class Class, key, desc string, defaultValue int64, validateFns ...func(int64) error, ) *ByteSizeSetting { var validateFn func(int64) error @@ -72,6 +66,6 @@ func RegisterByteSizeSetting( defaultValue: defaultValue, validateFn: validateFn, }} - register(key, desc, setting) + register(class, key, desc, setting) return setting } diff --git a/pkg/settings/common.go b/pkg/settings/common.go index eda0a0e71fc7..64c853d2d57e 100644 --- a/pkg/settings/common.go +++ b/pkg/settings/common.go @@ -18,19 +18,17 @@ import ( // common implements basic functionality used by all setting types. type common struct { description string + class Class visibility Visibility - systemOnly bool // Each setting has a slotIdx which is used as a handle with Values. slotIdx int nonReportable bool retired bool } -func (i *common) isRetired() bool { - return i.retired -} - -func (i *common) setSlotIdx(slotIdx int) { +// init must be called to initialize the fields that don't have defaults. +func (i *common) init(class Class, slotIdx int, description string) { + i.class = class if slotIdx < 1 { panic(fmt.Sprintf("Invalid slot index %d", slotIdx)) } @@ -38,25 +36,27 @@ func (i *common) setSlotIdx(slotIdx int) { panic("too many settings; increase MaxSettings") } i.slotIdx = slotIdx + i.description = description } -func (i *common) getSlotIdx() int { - return i.slotIdx + +func (i *common) isRetired() bool { + return i.retired } -func (i *common) setDescription(s string) { - i.description = s +func (i *common) getSlotIdx() int { + return i.slotIdx } func (i common) Description() string { return i.description } -func (i common) Visibility() Visibility { - return i.visibility +func (i common) Class() Class { + return i.class } -func (i common) SystemOnly() bool { - return i.systemOnly +func (i common) Visibility() Visibility { + return i.visibility } func (i common) isReportable() bool { @@ -103,10 +103,9 @@ func (i *common) SetOnChange(sv *Values, fn func(ctx context.Context)) { type internalSetting interface { NonMaskedSetting + init(class Class, slotIdx int, desc string) isRetired() bool setToDefault(ctx context.Context, sv *Values) - setDescription(desc string) - setSlotIdx(slotIdx int) getSlotIdx() int // isReportable indicates whether the value of the setting can be // included in user-facing reports such as that produced by SHOW ALL diff --git a/pkg/settings/doc.go b/pkg/settings/doc.go index fad77f88b589..d33e4bcaa4f8 100644 --- a/pkg/settings/doc.go +++ b/pkg/settings/doc.go @@ -27,7 +27,10 @@ setting is to be used. For example, to add an "enterprise" flag, adding into license_check.go: var enterpriseEnabled = settings.RegisterBoolSetting( - "enterprise.enabled", "some doc for the setting", false, + settings.TenantWritable, + "enterprise.enabled", + "some doc for the setting", + false, ) Then use with `if enterpriseEnabled.Get() ...` diff --git a/pkg/settings/duration.go b/pkg/settings/duration.go index e0f73b9655f1..08bca8d6c9cb 100644 --- a/pkg/settings/duration.go +++ b/pkg/settings/duration.go @@ -120,18 +120,12 @@ func (d *DurationSetting) WithPublic() *DurationSetting { return d } -// WithSystemOnly marks this setting as system-only and can be chained. -func (d *DurationSetting) WithSystemOnly() *DurationSetting { - d.common.systemOnly = true - return d -} - -// Defeat the linter. -var _ = (*DurationSetting).WithSystemOnly - // RegisterDurationSetting defines a new setting with type duration. func RegisterDurationSetting( - key, desc string, defaultValue time.Duration, validateFns ...func(time.Duration) error, + class Class, + key, desc string, + defaultValue time.Duration, + validateFns ...func(time.Duration) error, ) *DurationSetting { var validateFn func(time.Duration) error if len(validateFns) > 0 { @@ -154,7 +148,7 @@ func RegisterDurationSetting( defaultValue: defaultValue, validateFn: validateFn, } - register(key, desc, setting) + register(class, key, desc, setting) return setting } @@ -162,7 +156,7 @@ func RegisterDurationSetting( // public setting with type duration which requires an explicit unit when being // set. func RegisterPublicDurationSettingWithExplicitUnit( - key, desc string, defaultValue time.Duration, validateFn func(time.Duration) error, + class Class, key, desc string, defaultValue time.Duration, validateFn func(time.Duration) error, ) *DurationSettingWithExplicitUnit { var fn func(time.Duration) error @@ -179,7 +173,7 @@ func RegisterPublicDurationSettingWithExplicitUnit( }, } setting.SetVisibility(Public) - register(key, desc, setting) + register(class, key, desc, setting) return setting } diff --git a/pkg/settings/enum.go b/pkg/settings/enum.go index e061a0b975f4..a9d5a7d34ec2 100644 --- a/pkg/settings/enum.go +++ b/pkg/settings/enum.go @@ -110,18 +110,9 @@ func (e *EnumSetting) WithPublic() *EnumSetting { return e } -// WithSystemOnly indicates system-usage only and can be chained. -func (e *EnumSetting) WithSystemOnly() *EnumSetting { - e.common.systemOnly = true - return e -} - -// Defeat the linter. -var _ = (*EnumSetting).WithSystemOnly - // RegisterEnumSetting defines a new setting with type int. func RegisterEnumSetting( - key, desc string, defaultValue string, enumValues map[int64]string, + class Class, key, desc string, defaultValue string, enumValues map[int64]string, ) *EnumSetting { enumValuesLower := make(map[int64]string) var i int64 @@ -143,6 +134,6 @@ func RegisterEnumSetting( enumValues: enumValuesLower, } - register(key, fmt.Sprintf("%s %s", desc, enumValuesToDesc(enumValues)), setting) + register(class, key, fmt.Sprintf("%s %s", desc, enumValuesToDesc(enumValues)), setting) return setting } diff --git a/pkg/settings/float.go b/pkg/settings/float.go index fbaa89d84fe4..0a1bc5b841bd 100644 --- a/pkg/settings/float.go +++ b/pkg/settings/float.go @@ -109,18 +109,9 @@ func (f *FloatSetting) WithPublic() *FloatSetting { return f } -// WithSystemOnly indicates system-usage only and can be chained. -func (f *FloatSetting) WithSystemOnly() *FloatSetting { - f.common.systemOnly = true - return f -} - -// Defeat the linter. -var _ = (*FloatSetting).WithSystemOnly - // RegisterFloatSetting defines a new setting with type float. func RegisterFloatSetting( - key, desc string, defaultValue float64, validateFns ...func(float64) error, + class Class, key, desc string, defaultValue float64, validateFns ...func(float64) error, ) *FloatSetting { var validateFn func(float64) error if len(validateFns) > 0 { @@ -143,7 +134,7 @@ func RegisterFloatSetting( defaultValue: defaultValue, validateFn: validateFn, } - register(key, desc, setting) + register(class, key, desc, setting) return setting } diff --git a/pkg/settings/int.go b/pkg/settings/int.go index 17fe34d4b9f7..1fd73e71eaa1 100644 --- a/pkg/settings/int.go +++ b/pkg/settings/int.go @@ -103,7 +103,7 @@ func (i *IntSetting) setToDefault(ctx context.Context, sv *Values) { // RegisterIntSetting defines a new setting with type int with a // validation function. func RegisterIntSetting( - key, desc string, defaultValue int64, validateFns ...func(int64) error, + class Class, key, desc string, defaultValue int64, validateFns ...func(int64) error, ) *IntSetting { var composed func(int64) error if len(validateFns) > 0 { @@ -125,7 +125,7 @@ func RegisterIntSetting( defaultValue: defaultValue, validateFn: composed, } - register(key, desc, setting) + register(class, key, desc, setting) return setting } @@ -135,15 +135,6 @@ func (i *IntSetting) WithPublic() *IntSetting { return i } -// WithSystemOnly system-only usage and can be chained. -func (i *IntSetting) WithSystemOnly() *IntSetting { - i.common.systemOnly = true - return i -} - -// Defeat the linter. -var _ = (*IntSetting).WithSystemOnly - // PositiveInt can be passed to RegisterIntSetting func PositiveInt(v int64) error { if v < 1 { diff --git a/pkg/settings/masked.go b/pkg/settings/masked.go index bb6641d556ac..9c68f7de5faf 100644 --- a/pkg/settings/masked.go +++ b/pkg/settings/masked.go @@ -48,9 +48,9 @@ func (s *MaskedSetting) Typ() string { return s.setting.Typ() } -// SystemOnly returns the underlying setting's SystemOnly. -func (s *MaskedSetting) SystemOnly() bool { - return s.setting.SystemOnly() +// Class returns the class for the underlying setting. +func (s *MaskedSetting) Class() Class { + return s.setting.Class() } // TestingIsReportable is used in testing for reportability. diff --git a/pkg/settings/registry.go b/pkg/settings/registry.go index 697be27dfca0..74b695c9b5a9 100644 --- a/pkg/settings/registry.go +++ b/pkg/settings/registry.go @@ -117,7 +117,7 @@ var retiredSettings = map[string]struct{}{ } // register adds a setting to the registry. -func register(key, desc string, s internalSetting) { +func register(class Class, key, desc string, s internalSetting) { if _, ok := retiredSettings[key]; ok { panic(fmt.Sprintf("cannot reuse previously defined setting name: %s", key)) } @@ -142,9 +142,9 @@ func register(key, desc string, s internalSetting) { )) } } - s.setDescription(desc) registry[key] = s - s.setSlotIdx(len(registry)) + slotIdx := len(registry) + s.init(class, slotIdx, desc) } // NumRegisteredSettings returns the number of registered settings. diff --git a/pkg/settings/setting.go b/pkg/settings/setting.go index 72b37111796c..7f8c29751320 100644 --- a/pkg/settings/setting.go +++ b/pkg/settings/setting.go @@ -24,20 +24,23 @@ import ( type Setting interface { // Typ returns the short (1 char) string denoting the type of setting. Typ() string + // String returns the string representation of the setting's current value. // It's used when materializing results for `SHOW CLUSTER SETTINGS` or `SHOW // CLUSTER SETTING `. String(sv *Values) string + // Description contains a helpful text explaining what the specific cluster // setting is for. Description() string - // Visibility controls whether or not the setting is made publicly visible. - // Reserved settings are still accessible to users, but they don't get - // listed out when retrieving all settings. + + // Visibility returns whether or not the setting is made publicly visible. + // Reserved settings are still accessible to users, but they don't get listed + // out when retrieving all settings. Visibility() Visibility - // SystemOnly indicates if a setting is only applicable to the system tenant. - SystemOnly() bool + // Class returns the scope of the setting in multi-tenant scenarios. + Class() Class } // NonMaskedSetting is the exported interface of non-masked settings. @@ -47,22 +50,66 @@ type NonMaskedSetting interface { // Encoded returns the encoded representation of the current value of the // setting. Encoded(sv *Values) string + // EncodedDefault returns the encoded representation of the default value of // the setting. EncodedDefault() string + // SetOnChange installs a callback to be called when a setting's value // changes. `fn` should avoid doing long-running or blocking work as it is // called on the goroutine which handles all settings updates. SetOnChange(sv *Values, fn func(ctx context.Context)) + // ErrorHint returns a hint message to be displayed to the user when there's // an error. ErrorHint() (bool, string) } -// Visibility describes how a user should feel confident that -// they can customize the setting. See the constant definitions below -// for details. -type Visibility int +// Class describes the scope of a setting in multi-tenant scenarios. While all +// settings can be used on the system tenant, the classes restrict use on +// non-system tenants. +// +// Settings can only be registered via the Class, e.g. +// SystemOnly.RegisterIntSetting(). +// +// Guidelines for choosing a class: +// - Make sure to read the descriptions below carefully to understand the +// differences in semantics. +// +// - If the setting controls a user-visible aspect of SQL, it should be a +// TenantWritable setting. +// +// - Control settings relevant to tenant-specific internal implementation +// should be TenantReadOnly. +// +// - When in doubt, the first choice to consider should be TenantReadOnly. +// +// - SystemOnly should be used with caution: even internal tenant code is +// disallowed from using these settings at all. +type Class int8 + +const ( + // SystemOnly settings are associated with single-tenant clusters and host + // clusters. Settings with this class do not exist on non-system tenants and + // can only be used by the system tenant. + SystemOnly Class = iota + + // TenantReadOnly settings are visible to non-system tenants but cannot be + // modified by the tenant. Values for these settings are set from the system + // tenant and propagated from the host cluster. + TenantReadOnly + + // TenantWritable settings are visible to and can be modified by non-system + // tenants. The system can still override these settings; the overrides are + // propagated from the host cluster. + TenantWritable +) + +// Visibility describes how a user should feel confident that they can customize +// the setting. +// +// See the constant definitions below for details. +type Visibility int8 const ( // Reserved - which is the default - indicates that a setting is diff --git a/pkg/settings/settings_test.go b/pkg/settings/settings_test.go index 2a9ebfe20a3b..9bf27b1cd4f1 100644 --- a/pkg/settings/settings_test.go +++ b/pkg/settings/settings_test.go @@ -146,26 +146,30 @@ var changes = struct { byteSize int }{} -var boolTA = settings.RegisterBoolSetting("bool.t", "desc", true) -var boolFA = settings.RegisterBoolSetting("bool.f", "desc", false) -var strFooA = settings.RegisterStringSetting("str.foo", "desc", "") -var strBarA = settings.RegisterStringSetting("str.bar", "desc", "bar") -var i1A = settings.RegisterIntSetting("i.1", "desc", 0) -var i2A = settings.RegisterIntSetting("i.2", "desc", 5) -var fA = settings.RegisterFloatSetting("f", "desc", 5.4) -var dA = settings.RegisterDurationSetting("d", "desc", time.Second) -var duA = settings.RegisterPublicDurationSettingWithExplicitUnit("d_with_explicit_unit", "desc", time.Second, settings.NonNegativeDuration) -var _ = settings.RegisterDurationSetting("d_with_maximum", "desc", time.Second, settings.NonNegativeDurationWithMaximum(time.Hour)) -var eA = settings.RegisterEnumSetting("e", "desc", "foo", map[int64]string{1: "foo", 2: "bar", 3: "baz"}) -var byteSize = settings.RegisterByteSizeSetting("zzz", "desc", mb) -var mA = settings.TestingRegisterVersionSetting("v.1", "desc", &dummyVersionSettingImpl{}) +var boolTA = settings.RegisterBoolSetting(settings.SystemOnly, "bool.t", "desc", true) +var boolFA = settings.RegisterBoolSetting(settings.TenantReadOnly, "bool.f", "desc", false) +var strFooA = settings.RegisterStringSetting(settings.TenantWritable, "str.foo", "desc", "") +var strBarA = settings.RegisterStringSetting(settings.SystemOnly, "str.bar", "desc", "bar") +var i1A = settings.RegisterIntSetting(settings.TenantWritable, "i.1", "desc", 0) +var i2A = settings.RegisterIntSetting(settings.TenantWritable, "i.2", "desc", 5) +var fA = settings.RegisterFloatSetting(settings.TenantWritable, "f", "desc", 5.4) +var dA = settings.RegisterDurationSetting(settings.TenantWritable, "d", "desc", time.Second) +var duA = settings.RegisterPublicDurationSettingWithExplicitUnit(settings.TenantWritable, "d_with_explicit_unit", "desc", time.Second, settings.NonNegativeDuration) +var _ = settings.RegisterDurationSetting(settings.TenantWritable, "d_with_maximum", "desc", time.Second, settings.NonNegativeDurationWithMaximum(time.Hour)) +var eA = settings.RegisterEnumSetting(settings.TenantWritable, "e", "desc", "foo", map[int64]string{1: "foo", 2: "bar", 3: "baz"}) +var byteSize = settings.RegisterByteSizeSetting(settings.TenantWritable, "zzz", "desc", mb) +var mA = func() *settings.VersionSetting { + s := settings.MakeVersionSetting(&dummyVersionSettingImpl{}) + settings.RegisterVersionSetting(settings.SystemOnly, "v.1", "desc", &s) + return &s +}() func init() { - settings.RegisterBoolSetting("sekretz", "desc", false).SetReportable(false) - settings.RegisterBoolSetting("rezervedz", "desc", false).SetVisibility(settings.Reserved) + settings.RegisterBoolSetting(settings.SystemOnly, "sekretz", "desc", false).SetReportable(false) + settings.RegisterBoolSetting(settings.SystemOnly, "rezervedz", "desc", false).SetVisibility(settings.Reserved) } -var strVal = settings.RegisterValidatedStringSetting( +var strVal = settings.RegisterValidatedStringSetting(settings.SystemOnly, "str.val", "desc", "", func(sv *settings.Values, v string) error { for _, c := range v { if !unicode.IsLetter(c) { @@ -174,16 +178,16 @@ var strVal = settings.RegisterValidatedStringSetting( } return nil }) -var dVal = settings.RegisterDurationSetting("dVal", "desc", time.Second, settings.NonNegativeDuration) -var fVal = settings.RegisterFloatSetting("fVal", "desc", 5.4, settings.NonNegativeFloat) -var byteSizeVal = settings.RegisterByteSizeSetting( +var dVal = settings.RegisterDurationSetting(settings.SystemOnly, "dVal", "desc", time.Second, settings.NonNegativeDuration) +var fVal = settings.RegisterFloatSetting(settings.SystemOnly, "fVal", "desc", 5.4, settings.NonNegativeFloat) +var byteSizeVal = settings.RegisterByteSizeSetting(settings.SystemOnly, "byteSize.Val", "desc", mb, func(v int64) error { if v < 0 { return errors.Errorf("bytesize cannot be negative") } return nil }) -var iVal = settings.RegisterIntSetting( +var iVal = settings.RegisterIntSetting(settings.SystemOnly, "i.Val", "desc", 0, func(v int64) error { if v < 0 { return errors.Errorf("int cannot be negative") @@ -228,7 +232,9 @@ func TestCache(t *testing.T) { t.Run("VersionSetting", func(t *testing.T) { u := settings.NewUpdater(sv) - mB := settings.TestingRegisterVersionSetting("local.m", "foo", &dummyVersionSettingImpl{}) + v := settings.MakeVersionSetting(&dummyVersionSettingImpl{}) + mB := &v + settings.RegisterVersionSetting(settings.SystemOnly, "local.m", "foo", mB) // Version settings don't have defaults, so we need to start by setting // it to something. defaultDummyV := dummyVersion{msg1: "default", growsbyone: "X"} @@ -738,15 +744,15 @@ func batchRegisterSettings(t *testing.T, keyPrefix string, count int) (name stri }() for i := 0; i < count; i++ { name = fmt.Sprintf("%s_%3d", keyPrefix, i) - settings.RegisterIntSetting(name, "desc", 0) + settings.RegisterIntSetting(settings.SystemOnly, name, "desc", 0) } return name, err } -var overrideBool = settings.RegisterBoolSetting("override.bool", "desc", true) -var overrideInt = settings.RegisterIntSetting("override.int", "desc", 0) -var overrideDuration = settings.RegisterDurationSetting("override.duration", "desc", time.Second) -var overrideFloat = settings.RegisterFloatSetting("override.float", "desc", 1.0) +var overrideBool = settings.RegisterBoolSetting(settings.SystemOnly, "override.bool", "desc", true) +var overrideInt = settings.RegisterIntSetting(settings.TenantReadOnly, "override.int", "desc", 0) +var overrideDuration = settings.RegisterDurationSetting(settings.TenantWritable, "override.duration", "desc", time.Second) +var overrideFloat = settings.RegisterFloatSetting(settings.TenantWritable, "override.float", "desc", 1.0) func TestOverride(t *testing.T) { ctx := context.Background() diff --git a/pkg/settings/string.go b/pkg/settings/string.go index 2924bb24ab23..2080f44c0e19 100644 --- a/pkg/settings/string.go +++ b/pkg/settings/string.go @@ -102,14 +102,14 @@ func (s *StringSetting) WithPublic() *StringSetting { } // RegisterStringSetting defines a new setting with type string. -func RegisterStringSetting(key, desc string, defaultValue string) *StringSetting { - return RegisterValidatedStringSetting(key, desc, defaultValue, nil) +func RegisterStringSetting(class Class, key, desc string, defaultValue string) *StringSetting { + return RegisterValidatedStringSetting(class, key, desc, defaultValue, nil) } // RegisterValidatedStringSetting defines a new setting with type string with a // validation function. func RegisterValidatedStringSetting( - key, desc string, defaultValue string, validateFn func(*Values, string) error, + class Class, key, desc string, defaultValue string, validateFn func(*Values, string) error, ) *StringSetting { if validateFn != nil { if err := validateFn(nil, defaultValue); err != nil { @@ -124,6 +124,6 @@ func RegisterValidatedStringSetting( // PII and are thus non-reportable (to exclude them from telemetry // reports). setting.SetReportable(false) - register(key, desc, setting) + register(class, key, desc, setting) return setting } diff --git a/pkg/settings/version.go b/pkg/settings/version.go index b238be5ccf0d..0c02f21ac662 100644 --- a/pkg/settings/version.go +++ b/pkg/settings/version.go @@ -165,14 +165,6 @@ func (v *VersionSetting) setToDefault(ctx context.Context, sv *Values) {} // RegisterVersionSetting adds the provided version setting to the global // registry. -func RegisterVersionSetting(key, desc string, setting *VersionSetting) { - register(key, desc, setting) -} - -// TestingRegisterVersionSetting is like RegisterVersionSetting, -// but it takes a VersionSettingImpl. -func TestingRegisterVersionSetting(key, desc string, impl VersionSettingImpl) *VersionSetting { - setting := MakeVersionSetting(impl) - register(key, desc, &setting) - return &setting +func RegisterVersionSetting(class Class, key, desc string, setting *VersionSetting) { + register(class, key, desc, setting) } diff --git a/pkg/spanconfig/spanconfigmanager/manager.go b/pkg/spanconfig/spanconfigmanager/manager.go index 42f4e7cd26fd..f691fbd00cc7 100644 --- a/pkg/spanconfig/spanconfigmanager/manager.go +++ b/pkg/spanconfig/spanconfigmanager/manager.go @@ -34,6 +34,7 @@ import ( // spanconfig.experimental_reconciliation.enabled is configured. For host // tenants, COCKROACH_EXPERIMENTAL_SPAN_CONFIGS needs to be additionally set. var checkReconciliationJobInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "spanconfig.experimental_reconciliation_job.check_interval", "the frequency at which to check if the span config reconciliation job exists (and to start it if not)", 10*time.Minute, @@ -45,6 +46,7 @@ var checkReconciliationJobInterval = settings.RegisterDurationSetting( // For the host tenant it has no effect unless // COCKROACH_EXPERIMENTAL_SPAN_CONFIGS is also set. var jobEnabledSetting = settings.RegisterBoolSetting( + settings.TenantWritable, "spanconfig.experimental_reconciliation_job.enabled", "enable the use of the kv accessor", false) diff --git a/pkg/spanconfig/spanconfigstore/store.go b/pkg/spanconfig/spanconfigstore/store.go index 8a226ae5fc50..1aa417b7d9b6 100644 --- a/pkg/spanconfig/spanconfigstore/store.go +++ b/pkg/spanconfig/spanconfigstore/store.go @@ -31,10 +31,11 @@ import ( // infrastructure. It has no effect unless COCKROACH_EXPERIMENTAL_SPAN_CONFIGS // is set. var EnabledSetting = settings.RegisterBoolSetting( + settings.SystemOnly, "spanconfig.experimental_store.enabled", `use the span config infrastructure in KV instead of the system config span`, false, -).WithSystemOnly() +) // Store is an in-memory data structure to store and retrieve span configs. // Internally it makes use of an interval tree to store non-overlapping span diff --git a/pkg/sql/alter_database.go b/pkg/sql/alter_database.go index 37bcb19a8929..7745bebd17a6 100644 --- a/pkg/sql/alter_database.go +++ b/pkg/sql/alter_database.go @@ -286,6 +286,7 @@ type alterDatabaseDropRegionNode struct { } var allowDropFinalRegion = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.multiregion.drop_primary_region.enabled", "allows dropping the PRIMARY REGION of a database if it is the last region", true, diff --git a/pkg/sql/backfill.go b/pkg/sql/backfill.go index 9f40832c257e..2fa64b94cb7d 100644 --- a/pkg/sql/backfill.go +++ b/pkg/sql/backfill.go @@ -77,6 +77,7 @@ const ( // entries for before we attempt to fill in a single index batch before queueing // it up for ingestion and progress reporting in the index backfiller processor. var indexBackfillBatchSize = settings.RegisterIntSetting( + settings.TenantWritable, "bulkio.index_backfill.batch_size", "the number of rows for which we construct index entries in a single batch", 50000, @@ -85,6 +86,7 @@ var indexBackfillBatchSize = settings.RegisterIntSetting( // indexBackfillCheckpointInterval is the duration between backfill detail updates. var indexBackfillCheckpointInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "bulkio.index_backfill.checkpoint_interval", "the amount of time between index backfill checkpoint updates", 30*time.Second, @@ -94,6 +96,7 @@ var indexBackfillCheckpointInterval = settings.RegisterDurationSetting( // columnBackfillBatchSize is the maximum number of rows we update at once when // adding or removing columns. var columnBackfillBatchSize = settings.RegisterIntSetting( + settings.TenantWritable, "bulkio.column_backfill.batch_size", "the number of rows updated at a time to add/remove columns", 200, diff --git a/pkg/sql/backfill/backfill.go b/pkg/sql/backfill/backfill.go index eab39f7563bf..aaf990649c58 100644 --- a/pkg/sql/backfill/backfill.go +++ b/pkg/sql/backfill/backfill.go @@ -485,8 +485,10 @@ func (ib *IndexBackfiller) InitForLocalUse( } // constructExprs is a helper to construct the index and column expressions -// required for an index backfill. It also returns the set of columns referenced -// by any of these exprs. +// required for an index backfill. It also returns the set of non-virtual +// columns referenced by any of these exprs that should be fetched from the +// primary index. Virtual columns are not included because they don't exist in +// the primary index. // // The cols argument is the full set of cols in the table (including those being // added). The addedCols argument is the set of non-public, non-computed @@ -554,21 +556,34 @@ func constructExprs( colExprs[id] = computedExprs[i] } - // Ensure that only existing columns are added to the needed set. Otherwise - // the fetcher may complain that the columns don't exist. There's a somewhat - // subtle invariant that if any dependencies exist between computed columns - // and default values that the computed column be a later column and thus the - // default value will have been populated. Computed columns are not permitted - // to reference each other. - addToReferencedColumns := func(cols catalog.TableColSet) { - cols.ForEach(func(col descpb.ColumnID) { - if !addedColSet.Contains(col) { - referencedColumns.Add(col) + // Ensure that only existing, non-virtual columns are added to the needed + // set. Otherwise the fetcher may complain that the columns don't exist. + // There's a somewhat subtle invariant that if any dependencies exist + // between computed columns and default values that the computed column be a + // later column and thus the default value will have been populated. + // Computed columns are not permitted to reference each other. + addToReferencedColumns := func(cols catalog.TableColSet) error { + for colID, ok := cols.Next(0); ok; colID, ok = cols.Next(colID + 1) { + if addedColSet.Contains(colID) { + continue + } + col, err := desc.FindColumnWithID(colID) + if err != nil { + return errors.AssertionFailedf("column %d does not exist", colID) } - }) + if col.IsVirtual() { + continue + } + referencedColumns.Add(colID) + } + return nil + } + if err := addToReferencedColumns(predicateRefColIDs); err != nil { + return nil, nil, catalog.TableColSet{}, err + } + if err := addToReferencedColumns(computedExprRefColIDs); err != nil { + return nil, nil, catalog.TableColSet{}, err } - addToReferencedColumns(predicateRefColIDs) - addToReferencedColumns(computedExprRefColIDs) return predicates, colExprs, referencedColumns, nil } diff --git a/pkg/sql/catalog/descs/collection.go b/pkg/sql/catalog/descs/collection.go index c36e10895e27..f523331a77fb 100644 --- a/pkg/sql/catalog/descs/collection.go +++ b/pkg/sql/catalog/descs/collection.go @@ -203,6 +203,7 @@ func (tc *Collection) AddUncommittedDescriptor(desc catalog.MutableDescriptor) e // ValidateOnWriteEnabled is the cluster setting used to enable or disable // validating descriptors prior to writing. var ValidateOnWriteEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.catalog.descs.validate_on_write.enabled", "set to true to validate descriptors prior to writing, false to disable; default is true", true, /* defaultValue */ diff --git a/pkg/sql/catalog/hydratedtables/hydratedcache.go b/pkg/sql/catalog/hydratedtables/hydratedcache.go index 7e6288a71212..194150d7ba31 100644 --- a/pkg/sql/catalog/hydratedtables/hydratedcache.go +++ b/pkg/sql/catalog/hydratedtables/hydratedcache.go @@ -98,6 +98,7 @@ var ( // CacheSize controls the size of the LRU cache. var CacheSize = settings.RegisterIntSetting( + settings.TenantWritable, "sql.catalog.hydrated_tables.cache_size", "number of table descriptor versions retained in the hydratedtables LRU cache", 128, diff --git a/pkg/sql/catalog/lease/lease.go b/pkg/sql/catalog/lease/lease.go index e84d0ac372c5..4366682831f4 100644 --- a/pkg/sql/catalog/lease/lease.go +++ b/pkg/sql/catalog/lease/lease.go @@ -55,6 +55,7 @@ var errReadOlderVersion = errors.New("read older descriptor version from store") // LeaseDuration controls the duration of sql descriptor leases. var LeaseDuration = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.catalog.descriptor_lease_duration", "mean duration of sql descriptor leases, this actual duration is jitterred", base.DefaultDescriptorLeaseDuration) @@ -68,6 +69,7 @@ func between0and1inclusive(f float64) error { // LeaseJitterFraction controls the percent jitter around sql lease durations var LeaseJitterFraction = settings.RegisterFloatSetting( + settings.TenantWritable, "sql.catalog.descriptor_lease_jitter_fraction", "mean duration of sql descriptor leases, this actual duration is jitterred", base.DefaultDescriptorLeaseJitterFraction, @@ -1135,6 +1137,7 @@ func (m *Manager) watchForUpdates(ctx context.Context, descUpdateCh chan<- *desc // leaseRefreshLimit is the upper-limit on the number of descriptor leases // that will continuously have their lease refreshed. var leaseRefreshLimit = settings.RegisterIntSetting( + settings.TenantWritable, "sql.tablecache.lease.refresh_limit", "maximum number of descriptors to periodically refresh leases for", 500, diff --git a/pkg/sql/catalog/lease/storage.go b/pkg/sql/catalog/lease/storage.go index 7227a33b630b..ee38f52f6848 100644 --- a/pkg/sql/catalog/lease/storage.go +++ b/pkg/sql/catalog/lease/storage.go @@ -62,6 +62,7 @@ type storage struct { // LeaseRenewalDuration controls the default time before a lease expires when // acquisition to renew the lease begins. var LeaseRenewalDuration = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.catalog.descriptor_lease_renewal_fraction", "controls the default time before a lease expires when acquisition to renew the lease begins", base.DefaultDescriptorLeaseRenewalTimeout) diff --git a/pkg/sql/colexec/external_hash_aggregator.go b/pkg/sql/colexec/external_hash_aggregator.go index fb81354cc664..4e7ce8b8ac75 100644 --- a/pkg/sql/colexec/external_hash_aggregator.go +++ b/pkg/sql/colexec/external_hash_aggregator.go @@ -104,6 +104,7 @@ const HashAggregationDiskSpillingEnabledSettingName = "sql.distsql.temp_storage. // HashAggregationDiskSpillingEnabled is a cluster setting that allows to // disable hash aggregator disk spilling. var HashAggregationDiskSpillingEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, HashAggregationDiskSpillingEnabledSettingName, "set to false to disable hash aggregator disk spilling "+ "(this will improve performance, but the query might hit the memory limit)", diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index efa24c826d43..e01589781c12 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -476,6 +476,7 @@ CREATE TABLE crdb_internal.tables ( // is used to define the AS OF time for querying the system.table_statistics // table when building crdb_internal.table_row_statistics. var statsAsOfTimeClusterMode = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.crdb_internal.table_row_statistics.as_of_time", "historical query time used to build the crdb_internal.table_row_statistics table", -10*time.Second, diff --git a/pkg/sql/create_stats.go b/pkg/sql/create_stats.go index e04f713a72c9..507752306cd1 100644 --- a/pkg/sql/create_stats.go +++ b/pkg/sql/create_stats.go @@ -46,6 +46,7 @@ import ( // createStatsPostEvents controls the cluster setting for logging // automatic table statistics collection to the event log. var createStatsPostEvents = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.stats.post_events.enabled", "if set, an event is logged for every CREATE STATISTICS job", false, @@ -54,6 +55,7 @@ var createStatsPostEvents = settings.RegisterBoolSetting( // featureStatsEnabled is used to enable and disable the CREATE STATISTICS and // ANALYZE features. var featureStatsEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "feature.stats.enabled", "set to true to enable CREATE STATISTICS/ANALYZE, false to disable; default is true", featureflag.FeatureFlagEnabledDefault, diff --git a/pkg/sql/delegate/show_tables.go b/pkg/sql/delegate/show_tables.go index 8b6aa60d73f9..3c7926b1d6d7 100644 --- a/pkg/sql/delegate/show_tables.go +++ b/pkg/sql/delegate/show_tables.go @@ -21,6 +21,7 @@ import ( ) var showEstimatedRowCountClusterSetting = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.show_tables.estimated_row_count.enabled", "whether the estimated_row_count is shown on SHOW TABLES. Turning this off "+ "will improve SHOW TABLES performance.", diff --git a/pkg/sql/descriptor.go b/pkg/sql/descriptor.go index 65f5708c64df..38586e3cd3b1 100644 --- a/pkg/sql/descriptor.go +++ b/pkg/sql/descriptor.go @@ -356,6 +356,7 @@ const DefaultPrimaryRegionClusterSettingName = "sql.defaults.primary_region" // DefaultPrimaryRegion is a cluster setting that contains the default primary region. var DefaultPrimaryRegion = settings.RegisterStringSetting( + settings.TenantWritable, DefaultPrimaryRegionClusterSettingName, `if not empty, all databases created without a PRIMARY REGION will `+ `implicitly have the given PRIMARY REGION`, diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index 8f31fd77dcc9..5233131c2fc4 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -1357,6 +1357,7 @@ const defaultLocalScansConcurrencyLimit = 1024 // "additional" we mean having more processors than one in the same stage of the // physical plan. var localScansConcurrencyLimit = settings.RegisterIntSetting( + settings.TenantWritable, "sql.local_scans.concurrency_limit", "maximum number of additional goroutines for performing scans in local plans", defaultLocalScansConcurrencyLimit, diff --git a/pkg/sql/distsql_plan_stats.go b/pkg/sql/distsql_plan_stats.go index a3270952385e..af8cbc5d9f3e 100644 --- a/pkg/sql/distsql_plan_stats.go +++ b/pkg/sql/distsql_plan_stats.go @@ -47,6 +47,7 @@ const histogramSamples = 10000 // The lowest TTL we recommend is 10 minutes. This value must be lower than // that. var maxTimestampAge = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.stats.max_timestamp_age", "maximum age of timestamp during table statistics collection", 5*time.Minute, diff --git a/pkg/sql/event_log.go b/pkg/sql/event_log.go index a882369c7b8d..442f75145788 100644 --- a/pkg/sql/event_log.go +++ b/pkg/sql/event_log.go @@ -374,6 +374,7 @@ func LogEventForJobs( } var eventLogSystemTableEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "server.eventlog.enabled", "if set, logged notable events are also stored in the table system.eventlog", true, diff --git a/pkg/sql/exec_log.go b/pkg/sql/exec_log.go index 5befb350b5df..ff15331ddae5 100644 --- a/pkg/sql/exec_log.go +++ b/pkg/sql/exec_log.go @@ -66,12 +66,14 @@ import ( // logStatementsExecuteEnabled causes the Executor to log executed // statements and, if any, resulting errors. var logStatementsExecuteEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.trace.log_statement_execute", "set to true to enable logging of executed statements", false, ).WithPublic() var slowQueryLogThreshold = settings.RegisterPublicDurationSettingWithExplicitUnit( + settings.TenantWritable, "sql.log.slow_query.latency_threshold", "when set to non-zero, log statements whose service latency exceeds "+ "the threshold to a secondary logger on each node", @@ -80,6 +82,7 @@ var slowQueryLogThreshold = settings.RegisterPublicDurationSettingWithExplicitUn ) var slowInternalQueryLogEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.log.slow_query.internal_queries.enabled", "when set to true, internal queries which exceed the slow query log threshold "+ "are logged to a separate log. Must have the slow query log enabled for this "+ @@ -88,6 +91,7 @@ var slowInternalQueryLogEnabled = settings.RegisterBoolSetting( ).WithPublic() var slowQueryLogFullTableScans = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.log.slow_query.experimental_full_table_scans.enabled", "when set to true, statements that perform a full table/index scan will be logged to the "+ "slow query log even if they do not meet the latency threshold. Must have the slow query "+ @@ -96,18 +100,21 @@ var slowQueryLogFullTableScans = settings.RegisterBoolSetting( ).WithPublic() var unstructuredQueryLog = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.log.unstructured_entries.enabled", "when set, SQL execution and audit logs use the pre-v21.1 unstrucured format", false, ) var adminAuditLogEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.log.admin_audit.enabled", "when set, log SQL queries that are executed by a user with admin privileges", false, ) var telemetryLoggingEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.telemetry.query_sampling.enabled", "when set to true, executed queries will emit an event on the telemetry logging channel", // Note: Usage of an env var here makes it possible to set a default without diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index d071a85aa219..7cb3ee28fa53 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -104,6 +104,7 @@ import ( // ClusterOrganization is the organization name. var ClusterOrganization = settings.RegisterStringSetting( + settings.TenantWritable, "cluster.organization", "organization name", "", @@ -119,6 +120,7 @@ func ClusterIsInternal(sv *settings.Values) bool { // non-reportable. var ClusterSecret = func() *settings.StringSetting { s := settings.RegisterStringSetting( + settings.TenantWritable, "cluster.secret", "cluster specific secret", "", @@ -135,6 +137,7 @@ var ClusterSecret = func() *settings.StringSetting { // TODO(bob): Remove or n-op this in v2.4: https://github.com/cockroachdb/cockroach/issues/32844 var defaultIntSize = func() *settings.IntSetting { s := settings.RegisterIntSetting( + settings.TenantWritable, "sql.defaults.default_int_size", "the size, in bytes, of an INT type", 8, func(i int64) error { if i != 4 && i != 8 { @@ -149,6 +152,7 @@ var defaultIntSize = func() *settings.IntSetting { const allowCrossDatabaseFKsSetting = "sql.cross_db_fks.enabled" var allowCrossDatabaseFKs = settings.RegisterBoolSetting( + settings.TenantWritable, allowCrossDatabaseFKsSetting, "if true, creating foreign key references across databases is allowed", false, @@ -157,6 +161,7 @@ var allowCrossDatabaseFKs = settings.RegisterBoolSetting( const allowCrossDatabaseViewsSetting = "sql.cross_db_views.enabled" var allowCrossDatabaseViews = settings.RegisterBoolSetting( + settings.TenantWritable, allowCrossDatabaseViewsSetting, "if true, creating views that refer to other databases is allowed", false, @@ -165,6 +170,7 @@ var allowCrossDatabaseViews = settings.RegisterBoolSetting( const allowCrossDatabaseSeqOwnerSetting = "sql.cross_db_sequence_owners.enabled" var allowCrossDatabaseSeqOwner = settings.RegisterBoolSetting( + settings.TenantWritable, allowCrossDatabaseSeqOwnerSetting, "if true, creating sequences owned by tables from other databases is allowed", false, @@ -173,6 +179,7 @@ var allowCrossDatabaseSeqOwner = settings.RegisterBoolSetting( const allowCrossDatabaseSeqReferencesSetting = "sql.cross_db_sequence_references.enabled" var allowCrossDatabaseSeqReferences = settings.RegisterBoolSetting( + settings.TenantWritable, allowCrossDatabaseSeqReferencesSetting, "if true, sequences referenced by tables from other databases are allowed", false, @@ -185,6 +192,7 @@ const secondaryTenantsZoneConfigsEnabledSettingName = "sql.zone_configs.experime // // This setting has no effect on zone configurations that have already been set. var secondaryTenantZoneConfigsEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, secondaryTenantsZoneConfigsEnabledSettingName, "allow secondary tenants to set zone configurations; does not affect the system tenant", false, @@ -198,6 +206,7 @@ var secondaryTenantZoneConfigsEnabled = settings.RegisterBoolSetting( // all execution because traces are gathered for all transactions even // if they are not output. var traceTxnThreshold = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.trace.txn.enable_threshold", "duration beyond which all transactions are traced (set to 0 to "+ "disable). This setting is coarser grained than"+ @@ -210,6 +219,7 @@ var traceTxnThreshold = settings.RegisterDurationSetting( // to be able to reduce the noise associated with a larger transaction (e.g. // round trips to client). var traceStmtThreshold = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.trace.stmt.enable_threshold", "duration beyond which all statements are traced (set to 0 to disable). "+ "This applies to individual statements within a transaction and is therefore "+ @@ -222,6 +232,7 @@ var traceStmtThreshold = settings.RegisterDurationSetting( // non-trivial performance impact and also reveals SQL statements // which may be a privacy concern. var traceSessionEventLogEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.trace.session_eventlog.enabled", "set to true to enable session tracing. "+ "Note that enabling this may have a non-trivial negative performance impact.", @@ -235,6 +246,7 @@ const ReorderJoinsLimitClusterSettingName = "sql.defaults.reorder_joins_limit" // ReorderJoinsLimitClusterValue controls the cluster default for the maximum // number of joins reordered. var ReorderJoinsLimitClusterValue = settings.RegisterIntSetting( + settings.TenantWritable, ReorderJoinsLimitClusterSettingName, "default number of joins to reorder", opt.DefaultJoinOrderLimit, @@ -251,12 +263,14 @@ var ReorderJoinsLimitClusterValue = settings.RegisterIntSetting( ).WithPublic() var requireExplicitPrimaryKeysClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.require_explicit_primary_keys.enabled", "default value for requiring explicit primary keys in CREATE TABLE statements", false, ).WithPublic() var placementEnabledClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.multiregion_placement_policy.enabled", "default value for enable_multiregion_placement_policy;"+ " allows for use of PLACEMENT RESTRICTED", @@ -264,6 +278,7 @@ var placementEnabledClusterMode = settings.RegisterBoolSetting( ) var autoRehomingEnabledClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.experimental_auto_rehoming.enabled", "default value for experimental_enable_auto_rehoming;"+ " allows for rows in REGIONAL BY ROW tables to be auto-rehomed on UPDATE", @@ -271,6 +286,7 @@ var autoRehomingEnabledClusterMode = settings.RegisterBoolSetting( ).WithPublic() var onUpdateRehomeRowEnabledClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.on_update_rehome_row.enabled", "default value for on_update_rehome_row;"+ " enables ON UPDATE rehome_row() expressions to trigger on updates", @@ -278,18 +294,21 @@ var onUpdateRehomeRowEnabledClusterMode = settings.RegisterBoolSetting( ).WithPublic() var temporaryTablesEnabledClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.experimental_temporary_tables.enabled", "default value for experimental_enable_temp_tables; allows for use of temporary tables by default", false, ).WithPublic() var implicitColumnPartitioningEnabledClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.experimental_implicit_column_partitioning.enabled", "default value for experimental_enable_temp_tables; allows for the use of implicit column partitioning", false, ).WithPublic() var overrideMultiRegionZoneConfigClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.override_multi_region_zone_config.enabled", "default value for override_multi_region_zone_config; "+ "allows for overriding the zone configs of a multi-region table or database", @@ -297,18 +316,21 @@ var overrideMultiRegionZoneConfigClusterMode = settings.RegisterBoolSetting( ).WithPublic() var hashShardedIndexesEnabledClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.experimental_hash_sharded_indexes.enabled", "default value for experimental_enable_hash_sharded_indexes; allows for creation of hash sharded indexes by default", false, ).WithPublic() var zigzagJoinClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.zigzag_join.enabled", "default value for enable_zigzag_join session setting; allows use of zig-zag join by default", true, ).WithPublic() var optDrivenFKCascadesClusterLimit = settings.RegisterIntSetting( + settings.TenantWritable, "sql.defaults.foreign_key_cascades_limit", "default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query", 10000, @@ -316,6 +338,7 @@ var optDrivenFKCascadesClusterLimit = settings.RegisterIntSetting( ).WithPublic() var preferLookupJoinsForFKs = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.prefer_lookup_joins_for_fks.enabled", "default value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible", false, @@ -328,6 +351,7 @@ var preferLookupJoinsForFKs = settings.RegisterBoolSetting( // haven't been collected. Collection of histograms is controlled by the // cluster setting sql.stats.histogram_collection.enabled. var optUseHistogramsClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.optimizer_use_histograms.enabled", "default value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default", true, @@ -340,6 +364,7 @@ var optUseHistogramsClusterMode = settings.RegisterBoolSetting( // if they haven't been collected. Collection of multi-column stats is // controlled by the cluster setting sql.stats.multi_column_collection.enabled. var optUseMultiColStatsClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.optimizer_use_multicol_stats.enabled", "default value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default", true, @@ -351,6 +376,7 @@ var optUseMultiColStatsClusterMode = settings.RegisterBoolSetting( // searched for matching rows before remote nodes, in the hope that the // execution engine can avoid visiting remote nodes. var localityOptimizedSearchMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.locality_optimized_partitioned_index_scan.enabled", "default value for locality_optimized_partitioned_index_scan session setting; "+ "enables searching for rows in the current region before searching remote regions", @@ -358,18 +384,21 @@ var localityOptimizedSearchMode = settings.RegisterBoolSetting( ).WithPublic() var implicitSelectForUpdateClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.implicit_select_for_update.enabled", "default value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements", true, ).WithPublic() var insertFastPathClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.insert_fast_path.enabled", "default value for enable_insert_fast_path session setting; enables a specialized insert path", true, ).WithPublic() var experimentalAlterColumnTypeGeneralMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.experimental_alter_column_type.enabled", "default value for experimental_alter_column_type session setting; "+ "enables the use of ALTER COLUMN TYPE for general conversions", @@ -377,6 +406,7 @@ var experimentalAlterColumnTypeGeneralMode = settings.RegisterBoolSetting( ).WithPublic() var clusterStatementTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.defaults.statement_timeout", "default value for the statement_timeout; "+ "default value for the statement_timeout session setting; controls the "+ @@ -387,6 +417,7 @@ var clusterStatementTimeout = settings.RegisterDurationSetting( ).WithPublic() var clusterLockTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.defaults.lock_timeout", "default value for the lock_timeout; "+ "default value for the lock_timeout session setting; controls the "+ @@ -398,6 +429,7 @@ var clusterLockTimeout = settings.RegisterDurationSetting( ).WithPublic() var clusterIdleInSessionTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.defaults.idle_in_session_timeout", "default value for the idle_in_session_timeout; "+ "default value for the idle_in_session_timeout session setting; controls the "+ @@ -408,6 +440,7 @@ var clusterIdleInSessionTimeout = settings.RegisterDurationSetting( ).WithPublic() var clusterIdleInTransactionSessionTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.defaults.idle_in_transaction_session_timeout", "default value for the idle_in_transaction_session_timeout; controls the "+ "duration a session is permitted to idle in a transaction before the "+ @@ -419,6 +452,7 @@ var clusterIdleInTransactionSessionTimeout = settings.RegisterDurationSetting( // TODO(rytaft): remove this once unique without index constraints are fully // supported. var experimentalUniqueWithoutIndexConstraintsMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.experimental_enable_unique_without_index_constraints.enabled", "default value for experimental_enable_unique_without_index_constraints session setting;"+ "disables unique without index constraints by default", @@ -426,6 +460,7 @@ var experimentalUniqueWithoutIndexConstraintsMode = settings.RegisterBoolSetting ).WithPublic() var experimentalUseNewSchemaChanger = settings.RegisterEnumSetting( + settings.TenantWritable, "sql.defaults.experimental_new_schema_changer.enabled", "default value for experimental_use_new_schema_changer session setting;"+ "disables new schema changer by default", @@ -439,6 +474,7 @@ var experimentalUseNewSchemaChanger = settings.RegisterEnumSetting( ).WithPublic() var experimentalStreamReplicationEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.defaults.experimental_stream_replication.enabled", "default value for experimental_stream_replication session setting;"+ "enables the ability to setup a replication stream", @@ -446,12 +482,14 @@ var experimentalStreamReplicationEnabled = settings.RegisterBoolSetting( ).WithPublic() var stubCatalogTablesEnabledClusterValue = settings.RegisterBoolSetting( + settings.TenantWritable, `sql.defaults.stub_catalog_tables.enabled`, `default value for stub_catalog_tables session setting`, true, ).WithPublic() var experimentalComputedColumnRewrites = settings.RegisterValidatedStringSetting( + settings.TenantWritable, "sql.defaults.experimental_computed_column_rewrites", "allows rewriting computed column expressions in CREATE TABLE and ALTER TABLE statements; "+ "the format is: '(before expression) -> (after expression) [, (before expression) -> (after expression) ...]'", @@ -463,6 +501,7 @@ var experimentalComputedColumnRewrites = settings.RegisterValidatedStringSetting ) var propagateInputOrdering = settings.RegisterBoolSetting( + settings.TenantWritable, `sql.defaults.propagate_input_ordering.enabled`, `default value for the experimental propagate_input_ordering session variable`, false, @@ -471,6 +510,7 @@ var propagateInputOrdering = settings.RegisterBoolSetting( // settingWorkMemBytes is a cluster setting that determines the maximum amount // of RAM that a processor can use. var settingWorkMemBytes = settings.RegisterByteSizeSetting( + settings.TenantWritable, "sql.distsql.temp_storage.workmem", "maximum amount of memory in bytes a processor can use before falling back to temp storage", execinfra.DefaultMemoryLimit, /* 64MiB */ @@ -484,6 +524,7 @@ const ExperimentalDistSQLPlanningClusterSettingName = "sql.defaults.experimental // optimizer-driven DistSQL planning that sidesteps intermediate planNode // transition when going from opt.Expr to DistSQL processor specs. var experimentalDistSQLPlanningClusterMode = settings.RegisterEnumSetting( + settings.TenantWritable, ExperimentalDistSQLPlanningClusterSettingName, "default experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning", "off", @@ -500,6 +541,7 @@ const VectorizeClusterSettingName = "sql.defaults.vectorize" // VectorizeClusterMode controls the cluster default for when automatic // vectorization is enabled. var VectorizeClusterMode = settings.RegisterEnumSetting( + settings.TenantWritable, VectorizeClusterSettingName, "default vectorize mode", "on", @@ -513,6 +555,7 @@ var VectorizeClusterMode = settings.RegisterEnumSetting( // DistSQLClusterExecMode controls the cluster default for when DistSQL is used. var DistSQLClusterExecMode = settings.RegisterEnumSetting( + settings.TenantWritable, "sql.defaults.distsql", "default distributed SQL execution mode", "auto", @@ -527,6 +570,7 @@ var DistSQLClusterExecMode = settings.RegisterEnumSetting( // SerialNormalizationMode controls how the SERIAL type is interpreted in table // definitions. var SerialNormalizationMode = settings.RegisterEnumSetting( + settings.TenantWritable, "sql.defaults.serial_normalization", "default handling of SERIAL in table definitions", "rowid", @@ -540,6 +584,7 @@ var SerialNormalizationMode = settings.RegisterEnumSetting( ).WithPublic() var disallowFullTableScans = settings.RegisterBoolSetting( + settings.TenantWritable, `sql.defaults.disallow_full_table_scans.enabled`, "setting to true rejects queries that have planned a full table scan", false, @@ -547,6 +592,7 @@ var disallowFullTableScans = settings.RegisterBoolSetting( // intervalStyle controls intervals representation. var intervalStyle = settings.RegisterEnumSetting( + settings.TenantWritable, "sql.defaults.intervalstyle", "default value for IntervalStyle session setting", strings.ToLower(duration.IntervalStyle_POSTGRES.String()), @@ -567,6 +613,7 @@ var dateStyleEnumMap = map[int64]string{ // dateStyle controls dates representation. var dateStyle = settings.RegisterEnumSetting( + settings.TenantWritable, "sql.defaults.datestyle", "default value for DateStyle session setting", pgdate.DefaultDateStyle().SQLString(), @@ -579,6 +626,7 @@ const intervalStyleEnabledClusterSetting = "sql.defaults.intervalstyle.enabled" // TODO(#sql-experience): remove session setting in v22.1 and have this // always enabled. var intervalStyleEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, intervalStyleEnabledClusterSetting, "default value for intervalstyle_enabled session setting", false, @@ -590,12 +638,14 @@ const dateStyleEnabledClusterSetting = "sql.defaults.datestyle.enabled" // TODO(#sql-experience): remove session setting in v22.1 and have this // always enabled. var dateStyleEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, dateStyleEnabledClusterSetting, "default value for datestyle_enabled session setting", false, ).WithPublic() var txnRowsWrittenLog = settings.RegisterIntSetting( + settings.TenantWritable, "sql.defaults.transaction_rows_written_log", "the threshold for the number of rows written by a SQL transaction "+ "which - once exceeded - will trigger a logging event to SQL_PERF (or "+ @@ -605,6 +655,7 @@ var txnRowsWrittenLog = settings.RegisterIntSetting( ).WithPublic() var txnRowsWrittenErr = settings.RegisterIntSetting( + settings.TenantWritable, "sql.defaults.transaction_rows_written_err", "the limit for the number of rows written by a SQL transaction which - "+ "once exceeded - will fail the transaction (or will trigger a logging "+ @@ -614,6 +665,7 @@ var txnRowsWrittenErr = settings.RegisterIntSetting( ).WithPublic() var txnRowsReadLog = settings.RegisterIntSetting( + settings.TenantWritable, "sql.defaults.transaction_rows_read_log", "the threshold for the number of rows read by a SQL transaction "+ "which - once exceeded - will trigger a logging event to SQL_PERF (or "+ @@ -623,6 +675,7 @@ var txnRowsReadLog = settings.RegisterIntSetting( ).WithPublic() var txnRowsReadErr = settings.RegisterIntSetting( + settings.TenantWritable, "sql.defaults.transaction_rows_read_err", "the limit for the number of rows read by a SQL transaction which - "+ "once exceeded - will fail the transaction (or will trigger a logging "+ @@ -634,6 +687,7 @@ var txnRowsReadErr = settings.RegisterIntSetting( // This is a float setting (rather than an int setting) because the optimizer // uses floating point for calculating row estimates. var largeFullScanRows = settings.RegisterFloatSetting( + settings.TenantWritable, "sql.defaults.large_full_scan_rows", "default value for large_full_scan_rows session setting which determines "+ "the maximum table size allowed for a full scan when disallow_full_table_scans "+ diff --git a/pkg/sql/export.go b/pkg/sql/export.go index 6a01710899a1..3212b1f91757 100644 --- a/pkg/sql/export.go +++ b/pkg/sql/export.go @@ -93,6 +93,7 @@ const parquetSuffix = "parquet" // featureExportEnabled is used to enable and disable the EXPORT feature. var featureExportEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "feature.export.enabled", "set to true to enable exports, false to disable; default is true", featureflag.FeatureFlagEnabledDefault, diff --git a/pkg/sql/flowinfra/flow_registry.go b/pkg/sql/flowinfra/flow_registry.go index 9385c8d02867..687d95978399 100644 --- a/pkg/sql/flowinfra/flow_registry.go +++ b/pkg/sql/flowinfra/flow_registry.go @@ -40,6 +40,7 @@ func IsNoInboundStreamConnectionError(err error) bool { // SettingFlowStreamTimeout is a cluster setting that sets the default flow // stream timeout. var SettingFlowStreamTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.distsql.flow_stream_timeout", "amount of time incoming streams wait for a flow to be set up before erroring out", 10*time.Second, diff --git a/pkg/sql/flowinfra/flow_scheduler.go b/pkg/sql/flowinfra/flow_scheduler.go index ea2918618637..622122abbbc3 100644 --- a/pkg/sql/flowinfra/flow_scheduler.go +++ b/pkg/sql/flowinfra/flow_scheduler.go @@ -42,6 +42,7 @@ const flowDoneChanSize = 8 // TODO(yuzefovich): we probably want to remove / disable this limit completely // when we enable the admission control. var settingMaxRunningFlows = settings.RegisterIntSetting( + settings.TenantWritable, "sql.distsql.max_running_flows", "the value - when positive - used as is, or the value - when negative - "+ "multiplied by the number of CPUs on a node, to determine the "+ diff --git a/pkg/sql/idxusage/cluster_settings.go b/pkg/sql/idxusage/cluster_settings.go index 68343cd0e5f9..89c67668cd2b 100644 --- a/pkg/sql/idxusage/cluster_settings.go +++ b/pkg/sql/idxusage/cluster_settings.go @@ -14,5 +14,6 @@ import "github.com/cockroachdb/cockroach/pkg/settings" // Enable determines whether to collect per-index usage statistics. var Enable = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.metrics.index_usage_stats.enabled", "collect per index usage statistics", true, /* defaultValue */ ).WithPublic() diff --git a/pkg/sql/instrumentation.go b/pkg/sql/instrumentation.go index 9a82af4cea2a..3d1aae5f5e6c 100644 --- a/pkg/sql/instrumentation.go +++ b/pkg/sql/instrumentation.go @@ -40,6 +40,7 @@ import ( ) var collectTxnStatsSampleRate = settings.RegisterFloatSetting( + settings.TenantWritable, "sql.txn_stats.sample_rate", "the probability that a given transaction will collect execution statistics (displayed in the DB Console)", 0.01, diff --git a/pkg/sql/join_token.go b/pkg/sql/join_token.go index 722af85696d3..d0d85445ef9b 100644 --- a/pkg/sql/join_token.go +++ b/pkg/sql/join_token.go @@ -27,6 +27,7 @@ import ( // FeatureTLSAutoJoinEnabled is used to enable and disable the TLS auto-join // feature. var FeatureTLSAutoJoinEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "feature.tls_auto_join.enabled", "set to true to enable tls auto join through join tokens, false to disable; default is false", false, diff --git a/pkg/sql/logictest/testdata/logic_test/select_index b/pkg/sql/logictest/testdata/logic_test/select_index index f909ab720a5a..d7e41430fd25 100644 --- a/pkg/sql/logictest/testdata/logic_test/select_index +++ b/pkg/sql/logictest/testdata/logic_test/select_index @@ -254,7 +254,7 @@ statement ok CREATE TABLE str (k INT PRIMARY KEY, v STRING, INDEX(v)) statement ok -INSERT INTO str VALUES (1, 'A'), (4, 'AB'), (2, 'ABC'), (5, 'ABCD'), (3, 'ABCDEZ'), (9, 'ABD') +INSERT INTO str VALUES (1, 'A'), (4, 'AB'), (2, 'ABC'), (5, 'ABCD'), (3, 'ABCDEZ'), (9, 'ABD'), (10, '\CBA'), (11, 'A%'), (12, 'CAB.*'), (13, 'CABD') query IT rowsort SELECT k, v FROM str WHERE v LIKE 'ABC%' @@ -263,11 +263,41 @@ SELECT k, v FROM str WHERE v LIKE 'ABC%' 5 ABCD 3 ABCDEZ +query IT rowsort +SELECT k, v FROM str WHERE v LIKE '\ABC%' +---- +2 ABC +5 ABCD +3 ABCDEZ + +statement error LIKE regexp compilation failed: LIKE pattern must not end with escape character +SELECT k, v FROM str WHERE v LIKE 'ABC\' + +query IT rowsort +SELECT k, v FROM str WHERE v LIKE '\\CBA%' +---- +10 \CBA + +query IT rowsort +SELECT k, v FROM str WHERE v LIKE 'A\%' +---- +11 A% + +query IT rowsort +SELECT k, v FROM str WHERE v LIKE 'CAB.*' +---- +12 CAB.* + query IT rowsort SELECT k, v FROM str WHERE v LIKE 'ABC%Z' ---- 3 ABCDEZ +query IT rowsort +SELECT k, v FROM str WHERE v LIKE '\ABCDE_' +---- +3 ABCDEZ + query IT rowsort SELECT k, v FROM str WHERE v SIMILAR TO 'ABC_*' ---- diff --git a/pkg/sql/logictest/testdata/logic_test/virtual_columns b/pkg/sql/logictest/testdata/logic_test/virtual_columns index d31ae53d9553..8cbcde0789e0 100644 --- a/pkg/sql/logictest/testdata/logic_test/virtual_columns +++ b/pkg/sql/logictest/testdata/logic_test/virtual_columns @@ -1218,3 +1218,26 @@ INSERT INTO t67528 (s) VALUES ('') statement ok CREATE INDEX ON t67528 (v DESC) + +# Regression test for #73372. Test backfills with partial indexes that reference +# non-null virtual columns. +subtest 73372 + +statement ok +CREATE TABLE t73372 ( + i INT NOT NULL, + s STRING NOT NULL, + v INT AS (i) VIRTUAL NOT NULL, + INDEX idx (i) WHERE v >= 0 +) + +statement ok +INSERT INTO t73372 (i, s) VALUES (0, 'foo') + +statement ok +ALTER TABLE t73372 ALTER PRIMARY KEY USING COLUMNS (s, i) + +query ITI +SELECT * FROM t73372 +---- +0 foo 0 diff --git a/pkg/sql/notice.go b/pkg/sql/notice.go index e15b88336331..3d7b084f1b80 100644 --- a/pkg/sql/notice.go +++ b/pkg/sql/notice.go @@ -22,6 +22,7 @@ import ( // NoticesEnabled is the cluster setting that allows users // to enable notices. var NoticesEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.notices.enabled", "enable notices in the server/client protocol being sent", true, diff --git a/pkg/sql/opt/idxconstraint/index_constraints.go b/pkg/sql/opt/idxconstraint/index_constraints.go index 3b190164f3ef..ead9fbf6ea88 100644 --- a/pkg/sql/opt/idxconstraint/index_constraints.go +++ b/pkg/sql/opt/idxconstraint/index_constraints.go @@ -280,21 +280,28 @@ func (c *indexConstraintCtx) makeSpansForSingleColumnDatum( case opt.LikeOp: if s, ok := tree.AsDString(datum); ok { - if i := strings.IndexAny(string(s), "_%"); i >= 0 { - if i == 0 { - // Mask starts with _ or %. - c.unconstrained(offset, out) - return false + // Normalize the like pattern to a regexp. + if pattern, err := tree.LikeEscape(string(s)); err == nil { + if re, err := regexp.Compile(pattern); err == nil { + prefix, complete := re.LiteralPrefix() + if complete { + c.eqSpan(offset, tree.NewDString(prefix), out) + return true + } + if prefix == "" { + // Mask starts with _ or %. + c.unconstrained(offset, out) + return false + } + c.makeStringPrefixSpan(offset, prefix, out) + // If pattern is simply prefix + .* the span is tight. Also pattern + // will have regexp special chars escaped and so prefix needs to be + // escaped too. + if prefixEscape, err := tree.LikeEscape(prefix); err == nil { + return strings.HasSuffix(pattern, ".*") && strings.TrimSuffix(pattern, ".*") == prefixEscape + } } - c.makeStringPrefixSpan(offset, string(s[:i]), out) - // A mask like ABC% is equivalent to restricting the prefix to ABC. - // A mask like ABC%Z requires restricting the prefix, but is a stronger - // condition. - return (i == len(s)-1) && s[i] == '%' } - // No wildcard characters, this is an equality. - c.eqSpan(offset, &s, out) - return true } case opt.SimilarToOp: diff --git a/pkg/sql/opt/idxconstraint/testdata/strings b/pkg/sql/opt/idxconstraint/testdata/strings index a7b7dd70a335..0ee81a97124e 100644 --- a/pkg/sql/opt/idxconstraint/testdata/strings +++ b/pkg/sql/opt/idxconstraint/testdata/strings @@ -1,3 +1,80 @@ +index-constraints vars=(a string) index=a +a LIKE 'ABC' +---- +[/'ABC' - /'ABC'] + +# A backslash that isn't escaping anything is just removed from pattern. +index-constraints vars=(a string) index=a +a LIKE '\aABC%' +---- +[/'aABC' - /'aABD') + +# A backslash that isn't escaping anything is just removed from pattern. +index-constraints vars=(a string) index=a +a LIKE 'A\BC%' +---- +[/'ABC' - /'ABD') + +# Currently we punt on custom ESCAPE clauses. +index-constraints vars=(a string) index=a +a LIKE '\aABC%' ESCAPE '|' +---- +[ - ] +Remaining filter: like_escape(a, e'\\aABC%', '|') + +# Single char wildcard requires remaining filter. +index-constraints vars=(a string) index=a +a LIKE '\aABC_' +---- +[/'aABC' - /'aABD') +Remaining filter: a LIKE e'\\aABC_' + +# Ending with wildcard with other wildcards present isn't tight. +index-constraints vars=(a string) index=a +a LIKE 'AB_C%' +---- +[/'AB' - /'AC') +Remaining filter: a LIKE 'AB_C%' + +# Ignore zero prefix (wildcard at beginning). +index-constraints vars=(a string) index=a +a LIKE '%ABC' +---- +(/NULL - ] +Remaining filter: a LIKE '%ABC' + +# Ignore zero prefix (wildcard at beginning). +index-constraints vars=(a string) index=a +a LIKE '_ABC' +---- +(/NULL - ] +Remaining filter: a LIKE '_ABC' + +# A backslash that is escaping a wildcard becomes equality. +index-constraints vars=(a string) index=a +a LIKE 'ABC\%' +---- +[/'ABC%' - /'ABC%'] + +# A backslash that is escaping a wildcard becomes equality. +index-constraints vars=(a string) index=a +a LIKE 'ABC\_' +---- +[/'ABC_' - /'ABC_'] + +# A backslash that is escaping a wildcard becomes equality. +index-constraints vars=(a string) index=a +a LIKE 'ABC\_Z' +---- +[/'ABC_Z' - /'ABC_Z'] + +# Invalid pattern does not generate index constraints. +index-constraints vars=(a string) index=a +a LIKE 'ABC\' +---- +(/NULL - ] +Remaining filter: a LIKE e'ABC\\' + index-constraints vars=(a string) index=a a LIKE 'ABC%' ---- @@ -60,6 +137,18 @@ a SIMILAR TO 'ABC.*Z' [/'ABC' - /'ABD') Remaining filter: a SIMILAR TO 'ABC.*Z' +index-constraints vars=(a string) index=(a) +a SIMILAR TO 'ABC%Z' +---- +[/'ABC' - /'ABD') +Remaining filter: a SIMILAR TO 'ABC%Z' + +index-constraints vars=(a string) index=(a) +a SIMILAR TO 'ABC_Z' +---- +[/'ABC' - /'ABD') +Remaining filter: a SIMILAR TO 'ABC_Z' + index-constraints vars=(a string) index=(a) a SIMILAR TO 'ABC' ---- diff --git a/pkg/sql/opt/memo/constraint_builder.go b/pkg/sql/opt/memo/constraint_builder.go index 09cf4525c2ce..8cf1afbc1dd9 100644 --- a/pkg/sql/opt/memo/constraint_builder.go +++ b/pkg/sql/opt/memo/constraint_builder.go @@ -176,20 +176,26 @@ func (cb *constraintsBuilder) buildSingleColumnConstraintConst( case opt.LikeOp: if s, ok := tree.AsDString(datum); ok { - if i := strings.IndexAny(string(s), "_%"); i >= 0 { - if i == 0 { - // Mask starts with _ or %. - return unconstrained, false + // Normalize the like pattern to a RE + if pattern, err := tree.LikeEscape(string(s)); err == nil { + if re, err := regexp.Compile(pattern); err == nil { + prefix, complete := re.LiteralPrefix() + if complete { + return cb.eqSpan(col, tree.NewDString(prefix)), true + } + if prefix == "" { + // Mask starts with _ or %. + return unconstrained, false + } + c := cb.makeStringPrefixSpan(col, prefix) + // If pattern is simply prefix + .* the span is tight. Also pattern + // will have regexp special chars escaped and so prefix needs to be + // escaped too. The original string may have superfluous escape + if prefixEscape, err := tree.LikeEscape(prefix); err == nil { + return c, strings.HasSuffix(pattern, ".*") && strings.TrimSuffix(pattern, ".*") == prefixEscape + } } - c := cb.makeStringPrefixSpan(col, string(s[:i])) - // A mask like ABC% is equivalent to restricting the prefix to ABC. - // A mask like ABC%Z requires restricting the prefix, but is a stronger - // condition. - tight := (i == len(s)-1) && s[i] == '%' - return c, tight } - // No wildcard characters, this is an equality. - return cb.eqSpan(col, &s), true } case opt.SimilarToOp: diff --git a/pkg/sql/opt/memo/testdata/logprops/constraints b/pkg/sql/opt/memo/testdata/logprops/constraints index 4da04c26557e..7eccb8a324e7 100644 --- a/pkg/sql/opt/memo/testdata/logprops/constraints +++ b/pkg/sql/opt/memo/testdata/logprops/constraints @@ -1053,6 +1053,47 @@ select ├── variable: v:3 [type=string] └── const: 'ABC%' [type=string] +opt +SELECT * FROM kuv WHERE v LIKE '\ABC%' +---- +select + ├── columns: k:1(int!null) u:2(float) v:3(string!null) + ├── key: (1) + ├── fd: (1)-->(2,3) + ├── prune: (1,2) + ├── interesting orderings: (+1) + ├── scan kuv + │ ├── columns: k:1(int!null) u:2(float) v:3(string) + │ ├── key: (1) + │ ├── fd: (1)-->(2,3) + │ ├── prune: (1-3) + │ └── interesting orderings: (+1) + └── filters + └── like [type=bool, outer=(3), constraints=(/3: [/'ABC' - /'ABD'); tight)] + ├── variable: v:3 [type=string] + └── const: e'\\ABC%' [type=string] + +# Like doesn't support RE syntax. +opt +SELECT * FROM kuv WHERE v LIKE 'ABC.*' +---- +select + ├── columns: k:1(int!null) u:2(float) v:3(string!null) + ├── key: (1) + ├── fd: ()-->(3), (1)-->(2) + ├── prune: (1,2) + ├── interesting orderings: (+1 opt(3)) + ├── scan kuv + │ ├── columns: k:1(int!null) u:2(float) v:3(string) + │ ├── key: (1) + │ ├── fd: (1)-->(2,3) + │ ├── prune: (1-3) + │ └── interesting orderings: (+1) + └── filters + └── like [type=bool, outer=(3), constraints=(/3: [/'ABC.*' - /'ABC.*']; tight), fd=()-->(3)] + ├── variable: v:3 [type=string] + └── const: 'ABC.*' [type=string] + opt SELECT * FROM kuv WHERE v LIKE 'ABC_' ---- diff --git a/pkg/sql/opt/optbuilder/mutation_builder_unique.go b/pkg/sql/opt/optbuilder/mutation_builder_unique.go index c6625622a2a2..97d7afb10067 100644 --- a/pkg/sql/opt/optbuilder/mutation_builder_unique.go +++ b/pkg/sql/opt/optbuilder/mutation_builder_unique.go @@ -25,6 +25,7 @@ import ( // UniquenessChecksForGenRandomUUIDClusterMode controls the cluster setting for // enabling uniqueness checks for UUID columns set to gen_random_uuid(). var UniquenessChecksForGenRandomUUIDClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled", "if enabled, uniqueness checks may be planned for mutations of UUID columns updated with"+ " gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probability", diff --git a/pkg/sql/opt/optbuilder/util.go b/pkg/sql/opt/optbuilder/util.go index a1e6c0fa9756..c4317c5ad207 100644 --- a/pkg/sql/opt/optbuilder/util.go +++ b/pkg/sql/opt/optbuilder/util.go @@ -28,6 +28,7 @@ import ( ) var multipleModificationsOfTableEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.multiple_modifications_of_table.enabled", "if true, allow statements containing multiple INSERT ON CONFLICT, UPSERT, UPDATE, or DELETE "+ "subqueries modifying the same table, at the risk of data corruption if the same row is "+ diff --git a/pkg/sql/pgwire/hba_conf.go b/pkg/sql/pgwire/hba_conf.go index 082c4704ccd1..edcc8952ec27 100644 --- a/pkg/sql/pgwire/hba_conf.go +++ b/pkg/sql/pgwire/hba_conf.go @@ -72,6 +72,7 @@ const serverHBAConfSetting = "server.host_based_authentication.configuration" // configuration. var connAuthConf = func() *settings.StringSetting { s := settings.RegisterValidatedStringSetting( + settings.TenantWritable, serverHBAConfSetting, "host-based authentication configuration to use during connection authentication", "", diff --git a/pkg/sql/pgwire/ident_map_conf.go b/pkg/sql/pgwire/ident_map_conf.go index b264def06f1a..b96d754aba12 100644 --- a/pkg/sql/pgwire/ident_map_conf.go +++ b/pkg/sql/pgwire/ident_map_conf.go @@ -26,6 +26,7 @@ const serverIdentityMapSetting = "server.identity_map.configuration" var connIdentityMapConf = func() *settings.StringSetting { s := settings.RegisterValidatedStringSetting( + settings.TenantWritable, serverIdentityMapSetting, "system-identity to database-username mappings", "", diff --git a/pkg/sql/pgwire/pgwirebase/encoding.go b/pkg/sql/pgwire/pgwirebase/encoding.go index 1a16f3f6765b..c55e4e4f08ca 100644 --- a/pkg/sql/pgwire/pgwirebase/encoding.go +++ b/pkg/sql/pgwire/pgwirebase/encoding.go @@ -52,6 +52,7 @@ const readBufferMaxMessageSizeClusterSettingName = "sql.conn.max_read_buffer_mes // ReadBufferMaxMessageSizeClusterSetting is the cluster setting for configuring // ReadBuffer default message sizes. var ReadBufferMaxMessageSizeClusterSetting = settings.RegisterByteSizeSetting( + settings.TenantWritable, readBufferMaxMessageSizeClusterSettingName, "maximum buffer size to allow for ingesting sql statements. Connections must be restarted for this to take effect.", defaultMaxReadBufferMessageSize, diff --git a/pkg/sql/pgwire/server.go b/pkg/sql/pgwire/server.go index cd49bcd5fafc..991e6ac61bce 100644 --- a/pkg/sql/pgwire/server.go +++ b/pkg/sql/pgwire/server.go @@ -59,6 +59,7 @@ import ( // The "results_buffer_size" connection parameter can be used to override this // default for an individual connection. var connResultsBufferSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "sql.defaults.results_buffer.size", "default size of the buffer that accumulates results for a statement or a batch "+ "of statements before they are sent to the client. This can be overridden on "+ @@ -73,11 +74,13 @@ var connResultsBufferSize = settings.RegisterByteSizeSetting( ).WithPublic() var logConnAuth = settings.RegisterBoolSetting( + settings.TenantWritable, sql.ConnAuditingClusterSettingName, "if set, log SQL client connect and disconnect events (note: may hinder performance on loaded nodes)", false).WithPublic() var logSessionAuth = settings.RegisterBoolSetting( + settings.TenantWritable, sql.AuthAuditingClusterSettingName, "if set, log SQL session login/disconnection events (note: may hinder performance on loaded nodes)", false).WithPublic() diff --git a/pkg/sql/plan_opt.go b/pkg/sql/plan_opt.go index ad6b680f7eb8..f4f92af94671 100644 --- a/pkg/sql/plan_opt.go +++ b/pkg/sql/plan_opt.go @@ -37,6 +37,7 @@ import ( ) var queryCacheEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.query_cache.enabled", "enable the query cache", true, ) diff --git a/pkg/sql/revert.go b/pkg/sql/revert.go index 97a8e701fdb0..4c0122305585 100644 --- a/pkg/sql/revert.go +++ b/pkg/sql/revert.go @@ -32,6 +32,7 @@ const RevertTableDefaultBatchSize = 500000 // useTBIForRevertRange is a cluster setting that controls if the time-bound // iterator optimization is used when processing a revert range request. var useTBIForRevertRange = settings.RegisterBoolSetting( + settings.TenantWritable, "kv.bulk_io_write.revert_range_time_bound_iterator.enabled", "use the time-bound iterator optimization when processing a revert range request", true, diff --git a/pkg/sql/row/helper.go b/pkg/sql/row/helper.go index 3e840171a683..a80091997709 100644 --- a/pkg/sql/row/helper.go +++ b/pkg/sql/row/helper.go @@ -43,6 +43,7 @@ const ( ) var maxRowSizeLog = settings.RegisterByteSizeSetting( + settings.TenantWritable, "sql.guardrails.max_row_size_log", "maximum size of row (or column family if multiple column families are in use) that SQL can "+ "write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF "+ @@ -65,6 +66,7 @@ var maxRowSizeLog = settings.RegisterByteSizeSetting( ).WithPublic() var maxRowSizeErr = settings.RegisterByteSizeSetting( + settings.TenantWritable, "sql.guardrails.max_row_size_err", "maximum size of row (or column family if multiple column families are in use) that SQL can "+ "write to the database, above which an error is returned; use 0 to disable", diff --git a/pkg/sql/rowexec/columnbackfiller.go b/pkg/sql/rowexec/columnbackfiller.go index c1c105b4c285..659cc364cf17 100644 --- a/pkg/sql/rowexec/columnbackfiller.go +++ b/pkg/sql/rowexec/columnbackfiller.go @@ -47,6 +47,7 @@ var _ chunkBackfiller = &columnBackfiller{} // Each function retains a reference to its corresponding TxnCoordSender, so we // need to be careful not to accumulate an unbounded number of these functions. var backfillerMaxCommitWaitFns = settings.RegisterIntSetting( + settings.TenantWritable, "schemachanger.backfiller.max_commit_wait_fns", "the maximum number of commit-wait functions that the columnBackfiller will accumulate before consuming them to reclaim memory", 128, diff --git a/pkg/sql/rowexec/indexbackfiller.go b/pkg/sql/rowexec/indexbackfiller.go index ea41641cc9a2..9d3619ea586e 100644 --- a/pkg/sql/rowexec/indexbackfiller.go +++ b/pkg/sql/rowexec/indexbackfiller.go @@ -58,18 +58,22 @@ type indexBackfiller struct { var _ execinfra.Processor = &indexBackfiller{} var backfillerBufferSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "schemachanger.backfiller.buffer_size", "the initial size of the BulkAdder buffer handling index backfills", 32<<20, ) var backfillerMaxBufferSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "schemachanger.backfiller.max_buffer_size", "the maximum size of the BulkAdder buffer handling index backfills", 512<<20, ) var backfillerBufferIncrementSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "schemachanger.backfiller.buffer_increment", "the size by which the BulkAdder attempts to grow its buffer before flushing", 32<<20, ) var backillerSSTSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "schemachanger.backfiller.max_sst_size", "target size for ingested files during backfills", 16<<20, ) diff --git a/pkg/sql/rowexec/joinreader.go b/pkg/sql/rowexec/joinreader.go index f5a9f7a58602..2a37721d24a3 100644 --- a/pkg/sql/rowexec/joinreader.go +++ b/pkg/sql/rowexec/joinreader.go @@ -208,6 +208,7 @@ const joinReaderProcName = "join reader" // ParallelizeMultiKeyLookupJoinsEnabled determines whether the joinReader // parallelizes KV batches in all cases. var ParallelizeMultiKeyLookupJoinsEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.distsql.parallelize_multi_key_lookup_joins.enabled", "determines whether KV batches are executed in parallel for lookup joins in all cases. "+ "Enabling this will increase the speed of lookup joins when each input row might get "+ diff --git a/pkg/sql/rowexec/joinreader_strategies.go b/pkg/sql/rowexec/joinreader_strategies.go index 2f0609716473..99e46a410bba 100644 --- a/pkg/sql/rowexec/joinreader_strategies.go +++ b/pkg/sql/rowexec/joinreader_strategies.go @@ -509,6 +509,7 @@ const joinReaderOrderingStrategyBatchSizeDefault = 10 << 10 /* 10 KiB */ // JoinReaderOrderingStrategyBatchSize determines the size of input batches used // to construct a single lookup KV batch by joinReaderOrderingStrategy. var JoinReaderOrderingStrategyBatchSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "sql.distsql.join_reader_ordering_strategy.batch_size", "size limit on the input rows to construct a single lookup KV batch", joinReaderOrderingStrategyBatchSizeDefault, diff --git a/pkg/sql/schema_change_cluster_setting.go b/pkg/sql/schema_change_cluster_setting.go index 9cecf8220a66..eca033774083 100644 --- a/pkg/sql/schema_change_cluster_setting.go +++ b/pkg/sql/schema_change_cluster_setting.go @@ -22,6 +22,7 @@ import ( // any features that require schema changes. Documentation for which features // are covered TBD. var featureSchemaChangeEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "feature.schema_change.enabled", "set to true to enable schema changes, false to disable; default is true", featureflag.FeatureFlagEnabledDefault, diff --git a/pkg/sql/sem/tree/eval.go b/pkg/sql/sem/tree/eval.go index 6864829bfd7e..a1a9b88f8a06 100644 --- a/pkg/sql/sem/tree/eval.go +++ b/pkg/sql/sem/tree/eval.go @@ -2637,6 +2637,7 @@ var CmpOps = cmpOpFixups(map[ComparisonOperatorSymbol]cmpOpOverload{ const experimentalBox2DClusterSettingName = "sql.spatial.experimental_box2d_comparison_operators.enabled" var experimentalBox2DClusterSetting = settings.RegisterBoolSetting( + settings.TenantWritable, experimentalBox2DClusterSettingName, "enables the use of certain experimental box2d comparison operators", false, @@ -4981,6 +4982,13 @@ type likeKey struct { escape rune } +// LikeEscape converts a like pattern to a regexp pattern. +func LikeEscape(pattern string) (string, error) { + key := likeKey{s: pattern, caseInsensitive: false, escape: '\\'} + re, err := key.patternNoAnchor() + return re, err +} + // unescapePattern unescapes a pattern for a given escape token. // It handles escaped escape tokens properly by maintaining them as the escape // token in the return string. @@ -5340,11 +5348,7 @@ func calculateLengthAfterReplacingCustomEscape(s string, escape rune) (bool, int return changed, retLen, nil } -// Pattern implements the RegexpCacheKey interface. -// The strategy for handling custom escape character -// is to convert all unescaped escape character into '\'. -// k.escape can either be empty or a single character. -func (k likeKey) Pattern() (string, error) { +func (k likeKey) patternNoAnchor() (string, error) { // QuoteMeta escapes all regexp metacharacters (`\.+*?()|[]{}^$`) with a `\`. pattern := regexp.QuoteMeta(k.s) var err error @@ -5421,6 +5425,18 @@ func (k likeKey) Pattern() (string, error) { } } + return pattern, nil +} + +// Pattern implements the RegexpCacheKey interface. +// The strategy for handling custom escape character +// is to convert all unescaped escape character into '\'. +// k.escape can either be empty or a single character. +func (k likeKey) Pattern() (string, error) { + pattern, err := k.patternNoAnchor() + if err != nil { + return "", err + } return anchorPattern(pattern, k.caseInsensitive), nil } diff --git a/pkg/sql/serial.go b/pkg/sql/serial.go index 7050e7cf66e1..cdef293ef6fc 100644 --- a/pkg/sql/serial.go +++ b/pkg/sql/serial.go @@ -50,6 +50,7 @@ var virtualSequenceOpts = tree.SequenceOptions{ // cachedSequencesCacheSize is the default cache size used when // SessionNormalizationMode is SerialUsesCachedSQLSequences. var cachedSequencesCacheSizeSetting = settings.RegisterIntSetting( + settings.TenantWritable, "sql.defaults.serial_sequences_cache_size", "the default cache size when the session's serial normalization mode is set to cached sequences"+ "A cache size of 1 means no caching. Any cache size less than 1 is invalid.", diff --git a/pkg/sql/sessioninit/cache.go b/pkg/sql/sessioninit/cache.go index 24fc5c0f6f95..202bab723a1b 100644 --- a/pkg/sql/sessioninit/cache.go +++ b/pkg/sql/sessioninit/cache.go @@ -35,6 +35,7 @@ var CacheEnabledSettingName = "server.authentication_cache.enabled" // CacheEnabled is a cluster setting that determines if the // sessioninit.Cache and associated logic is enabled. var CacheEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, CacheEnabledSettingName, "enables a cache used during authentication to avoid lookups to system tables "+ "when retrieving per-user authentication-related information", diff --git a/pkg/sql/set_cluster_setting.go b/pkg/sql/set_cluster_setting.go index 7dab08e45b3c..c63851e483d6 100644 --- a/pkg/sql/set_cluster_setting.go +++ b/pkg/sql/set_cluster_setting.go @@ -96,7 +96,7 @@ func (p *planner) SetClusterSetting( return nil, errors.AssertionFailedf("expected writable setting, got %T", v) } - if setting.SystemOnly() && !p.execCfg.Codec.ForSystemTenant() { + if setting.Class() == settings.SystemOnly && !p.execCfg.Codec.ForSystemTenant() { return nil, pgerror.Newf(pgcode.InsufficientPrivilege, "setting %s is only settable in the system tenant", name) } diff --git a/pkg/sql/sqlliveness/slinstance/slinstance.go b/pkg/sql/sqlliveness/slinstance/slinstance.go index e7c059f652a5..51ef58ebb16f 100644 --- a/pkg/sql/sqlliveness/slinstance/slinstance.go +++ b/pkg/sql/sqlliveness/slinstance/slinstance.go @@ -33,6 +33,7 @@ import ( var ( // DefaultTTL specifies the time to expiration when a session is created. DefaultTTL = settings.RegisterDurationSetting( + settings.TenantWritable, "server.sqlliveness.ttl", "default sqlliveness session ttl", 40*time.Second, @@ -40,6 +41,7 @@ var ( ) // DefaultHeartBeat specifies the period between attempts to extend a session. DefaultHeartBeat = settings.RegisterDurationSetting( + settings.TenantWritable, "server.sqlliveness.heartbeat", "duration heart beats to push session expiration further out in time", 5*time.Second, diff --git a/pkg/sql/sqlliveness/slstorage/slstorage.go b/pkg/sql/sqlliveness/slstorage/slstorage.go index f574c1df1d0e..83593b8ae904 100644 --- a/pkg/sql/sqlliveness/slstorage/slstorage.go +++ b/pkg/sql/sqlliveness/slstorage/slstorage.go @@ -40,6 +40,7 @@ import ( // GCInterval specifies duration between attempts to delete extant // sessions that have expired. var GCInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "server.sqlliveness.gc_interval", "duration between attempts to delete extant sessions that have expired", 20*time.Second, @@ -51,6 +52,7 @@ var GCInterval = settings.RegisterDurationSetting( // // [(1-GCJitter) * GCInterval, (1+GCJitter) * GCInterval] var GCJitter = settings.RegisterFloatSetting( + settings.TenantWritable, "server.sqlliveness.gc_jitter", "jitter fraction on the duration between attempts to delete extant sessions that have expired", .15, @@ -69,6 +71,7 @@ var GCJitter = settings.RegisterFloatSetting( // increasing the cache size dynamically. The entries are just bytes each so // this should not be a big deal. var CacheSize = settings.RegisterIntSetting( + settings.TenantWritable, "server.sqlliveness.storage_session_cache_size", "number of session entries to store in the LRU", 1024) diff --git a/pkg/sql/sqlstats/cluster_settings.go b/pkg/sql/sqlstats/cluster_settings.go index 8655dd0e68b6..c254a52e02ae 100644 --- a/pkg/sql/sqlstats/cluster_settings.go +++ b/pkg/sql/sqlstats/cluster_settings.go @@ -18,6 +18,7 @@ import ( // StmtStatsEnable determines whether to collect per-statement statistics. var StmtStatsEnable = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.metrics.statement_details.enabled", "collect per-statement query statistics", true, ).WithPublic() @@ -25,6 +26,7 @@ var StmtStatsEnable = settings.RegisterBoolSetting( // transactions statistics for a single transaction. This defaults to 1000, and // currently is non-configurable (hidden setting). var TxnStatsNumStmtFingerprintIDsToRecord = settings.RegisterIntSetting( + settings.TenantWritable, "sql.metrics.transaction_details.max_statement_ids", "max number of statement fingerprint IDs to store for transaction statistics", 1000, @@ -34,12 +36,14 @@ var TxnStatsNumStmtFingerprintIDsToRecord = settings.RegisterIntSetting( // TxnStatsEnable determines whether to collect per-application transaction // statistics. var TxnStatsEnable = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.metrics.transaction_details.enabled", "collect per-application transaction statistics", true, ).WithPublic() // StatsCollectionLatencyThreshold specifies the minimum amount of time // consumed by a SQL statement before it is collected for statistics reporting. var StatsCollectionLatencyThreshold = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.metrics.statement_details.threshold", "minimum execution time to cause statement statistics to be collected. "+ "If configured, no transaction stats are collected.", @@ -49,6 +53,7 @@ var StatsCollectionLatencyThreshold = settings.RegisterDurationSetting( // DumpStmtStatsToLogBeforeReset specifies whether we dump the statements // statistics to logs before being reset. var DumpStmtStatsToLogBeforeReset = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.metrics.statement_details.dump_to_logs", "dump collected statement statistics to node logs when periodically cleared", false, @@ -57,6 +62,7 @@ var DumpStmtStatsToLogBeforeReset = settings.RegisterBoolSetting( // SampleLogicalPlans specifies whether we periodically sample the logical plan // for each fingerprint. var SampleLogicalPlans = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.metrics.statement_details.plan_collection.enabled", "periodically save a logical plan for each fingerprint", true, @@ -65,6 +71,7 @@ var SampleLogicalPlans = settings.RegisterBoolSetting( // LogicalPlanCollectionPeriod specifies the interval between collections of // logical plans for each fingerprint. var LogicalPlanCollectionPeriod = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.metrics.statement_details.plan_collection.period", "the time until a new logical plan is collected", 5*time.Minute, @@ -74,6 +81,7 @@ var LogicalPlanCollectionPeriod = settings.RegisterDurationSetting( // MaxMemSQLStatsStmtFingerprints specifies the maximum of unique statement // fingerprints we store in memory. var MaxMemSQLStatsStmtFingerprints = settings.RegisterIntSetting( + settings.TenantWritable, "sql.metrics.max_mem_stmt_fingerprints", "the maximum number of statement fingerprints stored in memory", 100000, @@ -82,6 +90,7 @@ var MaxMemSQLStatsStmtFingerprints = settings.RegisterIntSetting( // MaxMemSQLStatsTxnFingerprints specifies the maximum of unique transaction // fingerprints we store in memory. var MaxMemSQLStatsTxnFingerprints = settings.RegisterIntSetting( + settings.TenantWritable, "sql.metrics.max_mem_txn_fingerprints", "the maximum number of transaction fingerprints stored in memory", 100000, @@ -90,6 +99,7 @@ var MaxMemSQLStatsTxnFingerprints = settings.RegisterIntSetting( // MaxMemReportedSQLStatsStmtFingerprints specifies the maximum of unique statement // fingerprints we store in memory. var MaxMemReportedSQLStatsStmtFingerprints = settings.RegisterIntSetting( + settings.TenantWritable, "sql.metrics.max_mem_reported_stmt_fingerprints", "the maximum number of reported statement fingerprints stored in memory", 100000, @@ -98,6 +108,7 @@ var MaxMemReportedSQLStatsStmtFingerprints = settings.RegisterIntSetting( // MaxMemReportedSQLStatsTxnFingerprints specifies the maximum of unique transaction // fingerprints we store in memory. var MaxMemReportedSQLStatsTxnFingerprints = settings.RegisterIntSetting( + settings.TenantWritable, "sql.metrics.max_mem_reported_txn_fingerprints", "the maximum number of reported transaction fingerprints stored in memory", 100000, @@ -132,6 +143,7 @@ var MaxMemReportedSQLStatsTxnFingerprints = settings.RegisterIntSetting( // The total amount of memory consumed will still be constrained by the // top-level memory monitor created for SQL Stats. var MaxSQLStatsStmtFingerprintsPerExplicitTxn = settings.RegisterIntSetting( + settings.TenantWritable, "sql.metrics.max_stmt_fingerprints_per_explicit_txn", "the maximum number of statement fingerprints stored per explicit transaction", 2000, @@ -140,6 +152,7 @@ var MaxSQLStatsStmtFingerprintsPerExplicitTxn = settings.RegisterIntSetting( // MaxSQLStatReset is the cluster setting that controls at what interval SQL // statement statistics must be flushed within. var MaxSQLStatReset = settings.RegisterDurationSetting( + settings.TenantWritable, "diagnostics.forced_sql_stat_reset.interval", "interval after which the reported SQL Stats are reset even "+ "if not collected by telemetry reporter. It has a max value of 24H.", diff --git a/pkg/sql/sqlstats/persistedsqlstats/cluster_settings.go b/pkg/sql/sqlstats/persistedsqlstats/cluster_settings.go index 9620165593a2..044484ad2384 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/cluster_settings.go +++ b/pkg/sql/sqlstats/persistedsqlstats/cluster_settings.go @@ -21,6 +21,7 @@ import ( // SQLStatsFlushInterval is the cluster setting that controls how often the SQL // stats are flushed to system table. var SQLStatsFlushInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.stats.flush.interval", "the interval at which SQL execution statistics are flushed to disk", time.Hour, @@ -30,6 +31,7 @@ var SQLStatsFlushInterval = settings.RegisterDurationSetting( // SQLStatsFlushEnabled is the cluster setting that controls if the sqlstats // subsystem persists the statistics into system table. var SQLStatsFlushEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.stats.flush.enabled", "if set, SQL execution statistics are periodically flushed to disk", true, /* defaultValue */ @@ -41,6 +43,7 @@ var SQLStatsFlushEnabled = settings.RegisterBoolSetting( // [(1 - SQLStatsFlushJitter) * SQLStatsFlushInterval), // (1 + SQLStatsFlushJitter) * SQLStatsFlushInterval)] var SQLStatsFlushJitter = settings.RegisterFloatSetting( + settings.TenantWritable, "sql.stats.flush.jitter", "jitter fraction on the duration between sql stats flushes", 0.15, @@ -55,6 +58,7 @@ var SQLStatsFlushJitter = settings.RegisterFloatSetting( // SQLStatsMaxPersistedRows specifies maximum number of rows that will be // retained in system.statement_statistics and system.transaction_statistics. var SQLStatsMaxPersistedRows = settings.RegisterIntSetting( + settings.TenantWritable, "sql.stats.persisted_rows.max", "maximum number of rows of statement and transaction"+ " statistics that will be persisted in the system tables", @@ -64,6 +68,7 @@ var SQLStatsMaxPersistedRows = settings.RegisterIntSetting( // SQLStatsCleanupRecurrence is the cron-tab string specifying the recurrence // for SQL Stats cleanup job. var SQLStatsCleanupRecurrence = settings.RegisterValidatedStringSetting( + settings.TenantWritable, "sql.stats.cleanup.recurrence", "cron-tab recurrence for SQL Stats cleanup job", "@hourly", /* defaultValue */ diff --git a/pkg/sql/stats/automatic_stats.go b/pkg/sql/stats/automatic_stats.go index 4704e6502ba5..94bbc9940c98 100644 --- a/pkg/sql/stats/automatic_stats.go +++ b/pkg/sql/stats/automatic_stats.go @@ -41,6 +41,7 @@ const AutoStatsClusterSettingName = "sql.stats.automatic_collection.enabled" // AutomaticStatisticsClusterMode controls the cluster setting for enabling // automatic table statistics collection. var AutomaticStatisticsClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, AutoStatsClusterSettingName, "automatic statistics collection mode", true, @@ -49,6 +50,7 @@ var AutomaticStatisticsClusterMode = settings.RegisterBoolSetting( // MultiColumnStatisticsClusterMode controls the cluster setting for enabling // automatic collection of multi-column statistics. var MultiColumnStatisticsClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.stats.multi_column_collection.enabled", "multi-column statistics collection mode", true, @@ -59,6 +61,7 @@ var MultiColumnStatisticsClusterMode = settings.RegisterBoolSetting( // statistics (in high load scenarios). This value can be tuned to trade off // the runtime vs performance impact of automatic stats. var AutomaticStatisticsMaxIdleTime = settings.RegisterFloatSetting( + settings.TenantWritable, "sql.stats.automatic_collection.max_fraction_idle", "maximum fraction of time that automatic statistics sampler processors are idle", 0.9, @@ -77,6 +80,7 @@ var AutomaticStatisticsMaxIdleTime = settings.RegisterFloatSetting( // AutomaticStatisticsMinStaleRows. var AutomaticStatisticsFractionStaleRows = func() *settings.FloatSetting { s := settings.RegisterFloatSetting( + settings.TenantWritable, "sql.stats.automatic_collection.fraction_stale_rows", "target fraction of stale rows per table that will trigger a statistics refresh", 0.2, @@ -91,6 +95,7 @@ var AutomaticStatisticsFractionStaleRows = func() *settings.FloatSetting { // addition to the fraction AutomaticStatisticsFractionStaleRows. var AutomaticStatisticsMinStaleRows = func() *settings.IntSetting { s := settings.RegisterIntSetting( + settings.TenantWritable, "sql.stats.automatic_collection.min_stale_rows", "target minimum number of stale rows per table that will trigger a statistics refresh", 500, diff --git a/pkg/sql/stats/histogram.go b/pkg/sql/stats/histogram.go index 985d045b0442..18c543ac35cd 100644 --- a/pkg/sql/stats/histogram.go +++ b/pkg/sql/stats/histogram.go @@ -26,6 +26,7 @@ import ( // HistogramClusterMode controls the cluster setting for enabling // histogram collection. var HistogramClusterMode = settings.RegisterBoolSetting( + settings.TenantWritable, "sql.stats.histogram_collection.enabled", "histogram collection mode", true, diff --git a/pkg/sql/stmtdiagnostics/statement_diagnostics.go b/pkg/sql/stmtdiagnostics/statement_diagnostics.go index d172f2c17de1..d85eb1f4e0c6 100644 --- a/pkg/sql/stmtdiagnostics/statement_diagnostics.go +++ b/pkg/sql/stmtdiagnostics/statement_diagnostics.go @@ -36,11 +36,13 @@ import ( ) var pollingInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.stmt_diagnostics.poll_interval", "rate at which the stmtdiagnostics.Registry polls for requests, set to zero to disable", 10*time.Second) var bundleChunkSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, "sql.stmt_diagnostics.bundle_chunk_size", "chunk size for statement diagnostic bundles", 1024*1024, diff --git a/pkg/sql/tablewriter.go b/pkg/sql/tablewriter.go index f76388ee2392..a6b2e1b5e6ef 100644 --- a/pkg/sql/tablewriter.go +++ b/pkg/sql/tablewriter.go @@ -145,6 +145,7 @@ type tableWriterBase struct { } var maxBatchBytes = settings.RegisterByteSizeSetting( + settings.TenantWritable, "sql.mutations.mutation_batch_byte_size", "byte size - in key and value lengths -- for mutation batches", 4<<20, diff --git a/pkg/sql/telemetry_logging.go b/pkg/sql/telemetry_logging.go index 87493c8cc919..a7bdf2178cf2 100644 --- a/pkg/sql/telemetry_logging.go +++ b/pkg/sql/telemetry_logging.go @@ -24,6 +24,7 @@ import ( const defaultMaxEventFrequency = 10 var telemetryMaxEventFrequency = settings.RegisterIntSetting( + settings.TenantWritable, "sql.telemetry.query_sampling.max_event_frequency", "the max event frequency at which we sample queries for telemetry", defaultMaxEventFrequency, diff --git a/pkg/sql/temporary_schema.go b/pkg/sql/temporary_schema.go index ef411ab6df59..21a4dcfa5aa1 100644 --- a/pkg/sql/temporary_schema.go +++ b/pkg/sql/temporary_schema.go @@ -50,6 +50,7 @@ import ( // TempObjectCleanupInterval is a ClusterSetting controlling how often // temporary objects get cleaned up. var TempObjectCleanupInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.temp_object_cleaner.cleanup_interval", "how often to clean up orphaned temporary objects", 30*time.Minute, @@ -58,6 +59,7 @@ var TempObjectCleanupInterval = settings.RegisterDurationSetting( // TempObjectWaitInterval is a ClusterSetting controlling how long // after a creation a temporary object will be cleaned up. var TempObjectWaitInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "sql.temp_object_cleaner.wait_interval", "how long after creation a temporary object will be cleaned up", 30*time.Minute, diff --git a/pkg/sql/truncate.go b/pkg/sql/truncate.go index b4ff3bb28489..787dc12c8b27 100644 --- a/pkg/sql/truncate.go +++ b/pkg/sql/truncate.go @@ -152,6 +152,7 @@ func (t *truncateNode) Close(context.Context) {} // split points that we re-create on a table after a truncate. It's scaled by // the number of nodes in the cluster. var PreservedSplitCountMultiple = settings.RegisterIntSetting( + settings.TenantWritable, "sql.truncate.preserved_split_count_multiple", "set to non-zero to cause TRUNCATE to preserve range splits from the "+ "table's indexes. The multiple given will be multiplied with the number of "+ diff --git a/pkg/sql/user.go b/pkg/sql/user.go index d66788f013a1..8bc700587b1b 100644 --- a/pkg/sql/user.go +++ b/pkg/sql/user.go @@ -370,6 +370,7 @@ WHERE } var userLoginTimeout = settings.RegisterDurationSetting( + settings.TenantWritable, "server.user_login.timeout", "timeout after which client authentication times out if some system range is unavailable (0 = no timeout)", 10*time.Second, diff --git a/pkg/storage/engine.go b/pkg/storage/engine.go index adda64b314b8..6bd25981a687 100644 --- a/pkg/storage/engine.go +++ b/pkg/storage/engine.go @@ -1013,12 +1013,14 @@ func ClearRangeWithHeuristic(reader Reader, writer Writer, start, end roachpb.Ke } var ingestDelayL0Threshold = settings.RegisterIntSetting( + settings.TenantWritable, "rocksdb.ingest_backpressure.l0_file_count_threshold", "number of L0 files after which to backpressure SST ingestions", 20, ) var ingestDelayTime = settings.RegisterDurationSetting( + settings.TenantWritable, "rocksdb.ingest_backpressure.max_delay", "maximum amount of time to backpressure a single SST ingestion", time.Second*5, diff --git a/pkg/storage/mvcc.go b/pkg/storage/mvcc.go index 45c3314a18ed..dc608b90f0b2 100644 --- a/pkg/storage/mvcc.go +++ b/pkg/storage/mvcc.go @@ -66,6 +66,7 @@ var ( ) var minWALSyncInterval = settings.RegisterDurationSetting( + settings.TenantWritable, "rocksdb.min_wal_sync_interval", "minimum duration between syncs of the RocksDB WAL", 0*time.Millisecond, @@ -75,6 +76,7 @@ var minWALSyncInterval = settings.RegisterDurationSetting( // WriteIntentError in operations that return multiple intents per error. // Currently it is used in Scan, ReverseScan, and ExportToSST. var MaxIntentsPerWriteIntentError = settings.RegisterIntSetting( + settings.TenantWritable, "storage.mvcc.max_intents_per_error", "maximum number of intents returned in error during export of scan requests", maxIntentsPerWriteIntentErrorDefault) diff --git a/pkg/storage/pebble.go b/pkg/storage/pebble.go index 4c5bb0db2959..fc7f1b598a64 100644 --- a/pkg/storage/pebble.go +++ b/pkg/storage/pebble.go @@ -62,6 +62,7 @@ var maxSyncDurationDefault = envutil.EnvOrDefaultDuration("COCKROACH_ENGINE_MAX_ // MaxSyncDuration is the threshold above which an observed engine sync duration // triggers either a warning or a fatal error. var MaxSyncDuration = settings.RegisterDurationSetting( + settings.TenantWritable, "storage.max_sync_duration", "maximum duration for disk operations; any operations that take longer"+ " than this setting trigger a warning log entry or process crash", @@ -71,6 +72,7 @@ var MaxSyncDuration = settings.RegisterDurationSetting( // MaxSyncDurationFatalOnExceeded governs whether disk stalls longer than // MaxSyncDuration fatal the Cockroach process. Defaults to true. var MaxSyncDurationFatalOnExceeded = settings.RegisterBoolSetting( + settings.TenantWritable, "storage.max_sync_duration.fatal.enabled", "if true, fatal the process when a disk operation exceeds storage.max_sync_duration", maxSyncDurationFatalOnExceededDefault, diff --git a/pkg/ts/db.go b/pkg/ts/db.go index 4e22beb39e09..c569b9181d59 100644 --- a/pkg/ts/db.go +++ b/pkg/ts/db.go @@ -40,6 +40,7 @@ var ( // TimeseriesStorageEnabled controls whether to store timeseries data to disk. var TimeseriesStorageEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "timeseries.storage.enabled", "if set, periodic timeseries data is stored within the cluster; disabling is not recommended "+ "unless you are storing the data elsewhere", @@ -50,6 +51,7 @@ var TimeseriesStorageEnabled = settings.RegisterBoolSetting( // at he 10 second resolution. Data older than this is subject to being "rolled // up" into the 30 minute resolution and then deleted. var Resolution10sStorageTTL = settings.RegisterDurationSetting( + settings.TenantWritable, "timeseries.storage.resolution_10s.ttl", "the maximum age of time series data stored at the 10 second resolution. Data older than this "+ "is subject to rollup and deletion.", @@ -59,6 +61,7 @@ var Resolution10sStorageTTL = settings.RegisterDurationSetting( // deprecatedResolution30StoreDuration is retained for backward compatibility during a version upgrade. var deprecatedResolution30StoreDuration = func() *settings.DurationSetting { s := settings.RegisterDurationSetting( + settings.TenantWritable, "timeseries.storage.30m_resolution_ttl", "replaced by timeseries.storage.resolution_30m.ttl", resolution30mDefaultPruneThreshold, ) @@ -77,6 +80,7 @@ func init() { // retained at he 30 minute resolution. Data older than this is subject to // deletion. var Resolution30mStorageTTL = settings.RegisterDurationSetting( + settings.TenantWritable, "timeseries.storage.resolution_30m.ttl", "the maximum age of time series data stored at the 30 minute resolution. Data older than this "+ "is subject to deletion.", diff --git a/pkg/util/admission/granter.go b/pkg/util/admission/granter.go index 132c654ec797..8d2b3fea08a2 100644 --- a/pkg/util/admission/granter.go +++ b/pkg/util/admission/granter.go @@ -30,6 +30,7 @@ import ( // which the CPU will be considered overloaded, when running in a node that // executes KV operations. var KVSlotAdjusterOverloadThreshold = settings.RegisterIntSetting( + settings.TenantWritable, "admission.kv_slot_adjuster.overload_threshold", "when the number of runnable goroutines per CPU is greater than this threshold, the "+ "slot adjuster considers the cpu to be overloaded", @@ -38,6 +39,7 @@ var KVSlotAdjusterOverloadThreshold = settings.RegisterIntSetting( // L0FileCountOverloadThreshold sets a file count threshold that signals an // overloaded store. var L0FileCountOverloadThreshold = settings.RegisterIntSetting( + settings.TenantWritable, "admission.l0_file_count_overload_threshold", "when the L0 file count exceeds this theshold, the store is considered overloaded", l0FileCountOverloadThreshold, settings.PositiveInt) @@ -45,6 +47,7 @@ var L0FileCountOverloadThreshold = settings.RegisterIntSetting( // L0SubLevelCountOverloadThreshold sets a sub-level count threshold that // signals an overloaded store. var L0SubLevelCountOverloadThreshold = settings.RegisterIntSetting( + settings.TenantWritable, "admission.l0_sub_level_count_overload_threshold", "when the L0 sub-level count exceeds this threshold, the store is considered overloaded", l0SubLevelCountOverloadThreshold, settings.PositiveInt) diff --git a/pkg/util/admission/work_queue.go b/pkg/util/admission/work_queue.go index 3a83aad59f92..25052dcb7563 100644 --- a/pkg/util/admission/work_queue.go +++ b/pkg/util/admission/work_queue.go @@ -34,6 +34,7 @@ import ( // KVAdmissionControlEnabled controls whether KV server-side admission control // is enabled. var KVAdmissionControlEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "admission.kv.enabled", "when true, work performed by the KV layer is subject to admission control", true).WithPublic() @@ -41,6 +42,7 @@ var KVAdmissionControlEnabled = settings.RegisterBoolSetting( // SQLKVResponseAdmissionControlEnabled controls whether response processing // in SQL, for KV requests, is enabled. var SQLKVResponseAdmissionControlEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "admission.sql_kv_response.enabled", "when true, work performed by the SQL layer when receiving a KV response is subject to "+ "admission control", @@ -49,6 +51,7 @@ var SQLKVResponseAdmissionControlEnabled = settings.RegisterBoolSetting( // SQLSQLResponseAdmissionControlEnabled controls whether response processing // in SQL, for DistSQL requests, is enabled. var SQLSQLResponseAdmissionControlEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "admission.sql_sql_response.enabled", "when true, work performed by the SQL layer when receiving a DistSQL response is subject "+ "to admission control", diff --git a/pkg/util/log/BUILD.bazel b/pkg/util/log/BUILD.bazel index 64eee95868fa..808e522aeae0 100644 --- a/pkg/util/log/BUILD.bazel +++ b/pkg/util/log/BUILD.bazel @@ -26,6 +26,7 @@ go_library( "format_crdb_v2.go", "format_json.go", "formats.go", + "formattable_tags.go", "get_stacks.go", "http_sink.go", "intercept.go", @@ -142,6 +143,8 @@ go_test( "format_crdb_v1_test.go", "format_crdb_v2_test.go", "format_json_test.go", + "formats_test.go", + "formattable_tags_test.go", "helpers_test.go", "http_sink_test.go", "intercept_test.go", diff --git a/pkg/util/log/clog_test.go b/pkg/util/log/clog_test.go index dfe4b47ec942..38a88cdc67d2 100644 --- a/pkg/util/log/clog_test.go +++ b/pkg/util/log/clog_test.go @@ -651,7 +651,7 @@ func TestExitOnFullDisk(t *testing.T) { fs := &fileSink{} l := &loggerT{sinkInfos: []*sinkInfo{{ sink: fs, - editor: func(r redactablePackage) redactablePackage { return r }, + editor: getEditor(SelectEditMode(false /* redact */, true /* redactable */)), criticality: true, }}} fs.mu.file = &syncBuffer{ @@ -738,7 +738,7 @@ func TestLogEntryPropagation(t *testing.T) { } func BenchmarkLogEntry_String(b *testing.B) { - tagbuf := logtags.SingleTagBuffer("foo", "bar") + ctxtags := logtags.AddTag(context.Background(), "foo", "bar") entry := &logEntry{ idPayload: idPayload{ clusterID: "fooo", @@ -754,10 +754,10 @@ func BenchmarkLogEntry_String(b *testing.B) { file: "foo.go", line: 192, counter: 12, - tags: tagbuf, stacks: nil, structured: false, payload: entryPayload{ + tags: makeFormattableTags(ctxtags, false), redactable: false, message: "hello there", }, diff --git a/pkg/util/log/format_crdb_v2.go b/pkg/util/log/format_crdb_v2.go index f12bcdb991bc..a5de3f374ca7 100644 --- a/pkg/util/log/format_crdb_v2.go +++ b/pkg/util/log/format_crdb_v2.go @@ -286,9 +286,9 @@ func formatLogEntryInternalV2(entry logEntry, cp ttycolor.Profile) *buffer { // Display the tags if set. buf.Write(cp[ttycolor.Blue]) - if entry.tags != nil { + if entry.payload.tags != nil { buf.WriteByte('[') - buf.WriteString(renderTagsAsString(entry.tags, entry.payload.redactable)) + entry.payload.tags.formatToBuffer(buf) buf.WriteByte(']') } else { buf.WriteString("[-]") diff --git a/pkg/util/log/format_json.go b/pkg/util/log/format_json.go index a324b9ae8fe9..584e6e9ccf86 100644 --- a/pkg/util/log/format_json.go +++ b/pkg/util/log/format_json.go @@ -369,25 +369,9 @@ func formatJSON(entry logEntry, forFluent bool, tags tagChoice) *buffer { } // Tags. - if entry.tags != nil { + if entry.payload.tags != nil { buf.WriteString(`,"tags":{`) - comma := `"` - for _, t := range entry.tags.Get() { - buf.WriteString(comma) - escapeString(buf, t.Key()) - buf.WriteString(`":"`) - if v := t.Value(); v != nil && v != "" { - var r string - if entry.payload.redactable { - r = string(redact.Sprint(v)) - } else { - r = fmt.Sprint(v) - } - escapeString(buf, r) - } - buf.WriteByte('"') - comma = `,"` - } + entry.payload.tags.formatJSONToBuffer(buf) buf.WriteByte('}') } diff --git a/pkg/util/log/formats_test.go b/pkg/util/log/formats_test.go new file mode 100644 index 000000000000..1a31e1ba3363 --- /dev/null +++ b/pkg/util/log/formats_test.go @@ -0,0 +1,142 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package log + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "testing" + + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log/channel" + "github.com/cockroachdb/cockroach/pkg/util/log/logconfig" + "github.com/cockroachdb/logtags" + "github.com/cockroachdb/redact" + "github.com/stretchr/testify/require" +) + +func TestFormatRedaction(t *testing.T) { + defer leaktest.AfterTest(t)() + + sc := ScopeWithoutShowLogs(t) + defer sc.Close(t) + + // Make the test below deterministic. + formatNames := make([]string, 0, len(formatters)) + for n := range formatters { + formatNames = append(formatNames, n) + } + sort.Strings(formatNames) + + ctx := context.Background() + ctx = logtags.AddTag(ctx, "a", "secret1") + ctx = logtags.AddTag(ctx, "b", redact.Sprintf("safe1 %s", "secret2")) + + for _, formatName := range formatNames { + t.Run(formatName, func(t *testing.T) { + for _, redactable := range []bool{false, true} { + t.Run(fmt.Sprintf("redactable=%v", redactable), func(t *testing.T) { + for _, redactOut := range []bool{false, true} { + t.Run(fmt.Sprintf("redact=%v", redactOut), func(t *testing.T) { + subdir := filepath.Join(sc.logDir, formatName) + require.NoError(t, os.MkdirAll(subdir, 0755)) + + // Create a config that sends DEV to the subdirectory, + // with the format being tested. + config := logconfig.DefaultConfig() + config.Sinks.FileGroups = map[string]*logconfig.FileSinkConfig{ + "test": { + FileDefaults: logconfig.FileDefaults{ + Dir: &subdir, + CommonSinkConfig: logconfig.CommonSinkConfig{ + Format: &formatName, + Redactable: &redactable, + Redact: &redactOut, + }, + }, + Channels: logconfig.SelectChannels(channel.DEV), + }, + } + config.CaptureFd2.Enable = false + // Validate and apply the config. + require.NoError(t, config.Validate(&sc.logDir)) + TestingResetActive() + cleanupFn, err := ApplyConfig(config) + require.NoError(t, err) + defer cleanupFn() + + Infof(ctx, "safe2 %s", "secret3") + Flush() + + debugFileSink := debugLog.getFileSink() + contents, err := ioutil.ReadFile(debugFileSink.mu.file.(*syncBuffer).file.Name()) + require.NoError(t, err) + require.Greater(t, len(contents), 0) + lastLineStart := bytes.LastIndexByte(contents[:len(contents)-1], '\n') + require.Greater(t, lastLineStart, 0) + lastLine := contents[lastLineStart:] + + t.Logf("%s", lastLine) + + // Expect the safe message regardless of redaction configuration. + for i := 1; i <= 2; i++ { + toFind := "safe" + strconv.Itoa(i) + if !bytes.Contains(lastLine, []byte(toFind)) { + t.Errorf("expected %q in string:\n%s", toFind, lastLine) + } + } + + if !redactOut { + // Secrets should be preserved. + for i := 1; i <= 3; i++ { + toFind := "secret" + strconv.Itoa(i) + if !bytes.Contains(lastLine, []byte(toFind)) { + t.Errorf("expected %q in string:\n%s", toFind, lastLine) + } + } + } else { + // Secrets should be redacted out. + if bytes.Contains(lastLine, []byte("secret")) { + t.Errorf("secret not redacted:\n%s", lastLine) + } + } + + if redactable { + // Output should still contain redaction markers. + if redactOut { + if !bytes.Contains(lastLine, redact.StartMarker()) { + t.Errorf("markers missing from redactable output:\n%s", lastLine) + } + } else { + if !bytes.Contains(lastLine, append(redact.StartMarker(), "secret"...)) { + t.Errorf("secrets missing from redactable output:\n%s", lastLine) + } + } + } else { + // Output should escape redaction markers. + if bytes.Contains(lastLine, redact.StartMarker()) || + bytes.Contains(lastLine, redact.EndMarker()) { + t.Errorf("redaction marker not escaped:\n%s", lastLine) + } + } + }) + } + }) + } + }) + } +} diff --git a/pkg/util/log/formattable_tags.go b/pkg/util/log/formattable_tags.go new file mode 100644 index 000000000000..850d89b8b607 --- /dev/null +++ b/pkg/util/log/formattable_tags.go @@ -0,0 +1,217 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package log + +import ( + "bytes" + "context" + "fmt" + + "github.com/cockroachdb/logtags" + "github.com/cockroachdb/redact" + "github.com/cockroachdb/redact/interfaces" +) + +// formattableTags is a memory- efficient encoded representation of +// the logging tags, suitable for re-formatting in different output +// log formats. +// +// Internally, it is a sequence of nul-delimited strings, +// interleaving tag key and value strings. For example: +// {'n', 0, '1', 0, 's', 0, '2', 0} +// to encode e.g. n=1,s=2 +// +// Note that we preserve the invariant that there is always a value +// part for every key, and a nul byte after each part. So we have a +// guarantee that the number of nul bytes is even and all parts are +// terminated. +// +// If the formattableTags was constructed for a redactable +// entry, the value sub-strings are really RedactableStrings. +// If the entry is not redactable, the sub-strings are not +// RedactableStrings and should be escaped if emitted +// in a redactable context. +type formattableTags []byte + +func makeFormattableTags(ctx context.Context, redactable bool) (res formattableTags) { + tBuf := logtags.FromContext(ctx) + if tBuf == nil { + return nil + } + for _, t := range tBuf.Get() { + // The key is always considered safe. + // + // TODO(obs-inf/server): this assumes that log tag keys are safe, + // but this is not enforced throughout the code base. We could + // lint that it is true similar to how we lint that the format + // strings for `log.Infof` etc are const strings. + res = append(res, t.Key()...) + res = append(res, 0) + + // Encode the value. If there's no value, skip it. If it's the + // (common) empty string, don't bother - we know it's going to + // produce an empty encoding. This latter case is an optimization. + if val := t.Value(); val != nil && val != "" { + if !redactable { + // Pass the value as-is. + res = escapeNulBytes(res, fmt.Sprint(val)) + } else { + // Make the value redactable, which adds redaction markers around unsafe bits. + res = escapeNulBytes(res, string(redact.Sprint(val))) + } + } + res = append(res, 0) + } + return res +} + +func escapeNulBytes(res []byte, s string) []byte { + k := 0 + for i := 0; i < len(s); i++ { + if s[i] == 0 { + res = append(res, s[k:i]...) + res = append(res, '?') + k = i + 1 + } + } + res = append(res, s[k:]...) + return res +} + +// formattableTagsIterator is a helper for the various formatting functions below. +type formattableTagsIterator struct { + tags []byte +} + +// next advances the iterator to the next key/val pair. +func (i *formattableTagsIterator) next() (key, val []byte, done bool) { + if len(i.tags) == 0 { + return nil, nil, true + } + // Advance the cursor to the beginning of the first next value. + // No need to check for the value -1 because of the invariant + // that all parts are nul-terminated. + nv := bytes.IndexByte(i.tags, 0) + // The key is everything so far. + key = i.tags[:nv] + // Truncate the buffer to the value and everything after. + i.tags = i.tags[nv+1:] + // Advance the cursor to the beginning of the first next key. + // Ditto invariant. + nk := bytes.IndexByte(i.tags, 0) + // The value is everything so far. + val = i.tags[:nk] + // Truncate the buffer to the next key and everything after. + i.tags = i.tags[nk+1:] + return key, val, false +} + +// redactTagValues redacts the values entirely. This is used when +// converting a redactable=false entry to redacted=true output. +func (f formattableTags) redactTagValues(preserveMarkers bool) (res formattableTags) { + // heuristic: output is not longer than input. + // (It could be in the case when a value string is shorter than the redacted marker.) + res = make([]byte, 0, len(f)) + marker := redactedMarker + if !preserveMarkers { + marker = strippedMarker + } + fi := formattableTagsIterator{tags: []byte(f)} + for { + key, val, done := fi.next() + if done { + break + } + res = append(res, key...) + res = append(res, 0) + if len(val) > 0 { + res = append(res, marker...) + } + res = append(res, 0) + } + return res +} + +// formatToSafeWriter emits the tags to a safe writer, which means preserve +// redaction markers that were there to start with, if any, but be careful +// not to introduce imbalanced redaction markers. +func (f formattableTags) formatToSafeWriter(w interfaces.SafeWriter, redactable bool) { + fi := formattableTagsIterator{tags: []byte(f)} + for i := 0; ; i++ { + key, val, done := fi.next() + if done { + break + } + if i > 0 { + w.SafeRune(',') + } + // The key part is always considered safe. + w.Print(redact.RedactableBytes(key)) + if len(val) > 0 { + if len(key) != 1 { + // We skip the `=` sign for 1-letter keys: we write "n1" in logs, not "n=1". + w.SafeRune('=') + } + if redactable { + // If the entry was redactable to start with, the value part + // is redactable already and can be passed through as-is. + w.Print(redact.RedactableBytes(val)) + } else { + // Otherwise, the value part is unsafe and must be escaped. + w.Print(string(val)) + } + } + } +} + +// formatToBuffer emits the key=value pairs to the output buffer +// separated by commas. +func (f formattableTags) formatToBuffer(buf *buffer) { + fi := formattableTagsIterator{tags: []byte(f)} + for i := 0; ; i++ { + key, val, done := fi.next() + if done { + break + } + if i > 0 { + buf.WriteByte(',') + } + buf.Write(key) + if len(val) > 0 { + if len(key) != 1 { + // We skip the `=` sign for 1-letter keys: we write "n1" in logs, not "n=1". + buf.WriteByte('=') + } + buf.Write(val) + } + } +} + +// formatToBuffer emits the "key":"value" pairs to the output buffer +// separated by commas, in JSON. Special JSON characters in the +// keys/values get escaped. +func (f formattableTags) formatJSONToBuffer(buf *buffer) { + fi := formattableTagsIterator{tags: []byte(f)} + for i := 0; ; i++ { + key, val, done := fi.next() + if done { + break + } + if i > 0 { + buf.WriteByte(',') + } + buf.WriteByte('"') + escapeString(buf, string(key)) + buf.WriteString(`":"`) + escapeString(buf, string(val)) + buf.WriteByte('"') + } +} diff --git a/pkg/util/log/formattable_tags_test.go b/pkg/util/log/formattable_tags_test.go new file mode 100644 index 000000000000..ec727dce4f05 --- /dev/null +++ b/pkg/util/log/formattable_tags_test.go @@ -0,0 +1,109 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package log + +import ( + "context" + "testing" + + "github.com/cockroachdb/logtags" + "github.com/cockroachdb/redact" + "github.com/stretchr/testify/assert" +) + +func TestFormattableTags(t *testing.T) { + testCases := []struct { + ctx context.Context + redactable bool + safe redact.RedactableString + buf string + json string + rvals string + }{ + { + ctx: context.Background(), + redactable: false, + safe: "", buf: "", json: "", rvals: "", + }, + { + ctx: context.Background(), + redactable: true, + safe: "", buf: "", json: "", rvals: "", + }, + { + ctx: logtags.AddTag(logtags.AddTag(logtags.AddTag(logtags.AddTag(logtags.AddTag(context.Background(), + "noval", nil), + "n", 1), + "m", "uns‹afe"), + "z", "x\x00y"), + "long", redact.Sprintf(`safe "%s"`, "unsafe")), + redactable: false, + // Because the entry is not redactable to start with, when + // emitting in a safe writer context, all the value strings are + // considered unsafe and any special characters, e.g. redaction + // markers, get escaped. + safe: `noval,n‹1›,m‹uns?afe›,z‹x?y›,long=‹safe "?unsafe?"›`, + // Because the entry is not redactable, when emitting raw + // we do not care about escaping. So it's possible for + // redaction markers to be unbalanced. + buf: `noval,n1,muns‹afe,zx?y,long=safe "‹unsafe›"`, + // Ditto for json. + json: `"noval":"","n":"1","m":"uns‹afe","z":"x?y","long":"safe \"‹unsafe›\""`, + // Redacted values everywhere. + rvals: "noval,n×,m×,z×,long=×", + }, + { + ctx: logtags.AddTag(logtags.AddTag(logtags.AddTag(logtags.AddTag(logtags.AddTag(context.Background(), + "noval", nil), + "n", 1), + "m", "uns‹afe"), + "z", "x\x00y"), + "long", redact.Sprintf(`safe "%s"`, "unsafe")), + redactable: true, + // The entry is redactable, so we can do the right thing in the various output contexts. + safe: `noval,n1,m‹uns?afe›,z‹x?y›,long=safe "‹unsafe›"`, + buf: `noval,n1,m‹uns?afe›,z‹x?y›,long=safe "‹unsafe›"`, + json: `"noval":"","n":"1","m":"‹uns?afe›","z":"‹x?y›","long":"safe \"‹unsafe›\""`, + // In any case, redacted values everywhere. + rvals: "noval,n×,m×,z×,long=×", + }, + } + + for i, tc := range testCases { + tags := makeFormattableTags(tc.ctx, tc.redactable) + + var rbuf redact.StringBuilder + tags.formatToSafeWriter(&rbuf, tc.redactable) + assert.Equal(t, tc.safe, rbuf.RedactableString(), "safeprint %d", i) + + var buf buffer + tags.formatToBuffer(&buf) + assert.Equal(t, tc.buf, buf.String(), "bufprint %d", i) + + buf = buffer{} + tags.formatJSONToBuffer(&buf) + assert.Equal(t, tc.json, buf.String(), "jsonprint %d", i) + + buf = buffer{} + rtags := tags.redactTagValues(false /* preserve markers */) + rtags.formatToBuffer(&buf) + + assert.Equal(t, tc.rvals, buf.String(), "redactvals %d", i) + } +} + +func TestEscapeNulBytes(t *testing.T) { + assert.Equal(t, string(escapeNulBytes(nil, "")), "") + assert.Equal(t, string(escapeNulBytes(nil, "\x00")), "?") + assert.Equal(t, string(escapeNulBytes(nil, "abc\x00")), "abc?") + assert.Equal(t, string(escapeNulBytes(nil, "\x00abc")), "?abc") + assert.Equal(t, string(escapeNulBytes(nil, "aa\x00bb\x00\x00cc")), "aa?bb??cc") +} diff --git a/pkg/util/log/log_bridge.go b/pkg/util/log/log_bridge.go index 749665aeefde..03b551592709 100644 --- a/pkg/util/log/log_bridge.go +++ b/pkg/util/log/log_bridge.go @@ -76,7 +76,9 @@ func (lb logBridge) Write(b []byte) (n int, err error) { return len(b), nil } - entry := makeUnstructuredEntry(context.Background(), + ctx := context.Background() + + entry := makeUnstructuredEntry(ctx, Severity(lb), // Note: because the caller is using the stdLog interface, we don't // really know what is being logged. Therefore we must use the @@ -89,18 +91,18 @@ func (lb logBridge) Write(b []byte) (n int, err error) { // Split "d.go:23: message" into "d.go", "23", and "message". if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - entry.payload = makeRedactablePayload(redact.Sprintf("bad log format: %s", b)) + entry.payload = makeRedactablePayload(ctx, redact.Sprintf("bad log format: %s", b)) } else { // We use a "(gostd)" prefix so that these log lines correctly point // to the go standard library instead of our own source directory. entry.file = "(gostd) " + string(parts[0]) lineno, err := strconv.ParseInt(string(parts[1]), 10, 64) if err != nil { - entry.payload = makeRedactablePayload(redact.Sprintf("bad line number: %s", b)) + entry.payload = makeRedactablePayload(ctx, redact.Sprintf("bad line number: %s", b)) lineno = 1 } else { payload := parts[2][1 : len(parts[2])-1] // skip leading space and trailing newline - entry.payload = makeRedactablePayload(redact.Sprintf("%s", payload)) + entry.payload = makeRedactablePayload(ctx, redact.Sprintf("%s", payload)) } entry.line = int(lineno) } diff --git a/pkg/util/log/log_entry.go b/pkg/util/log/log_entry.go index a8d411fee5c3..f980ac234cb3 100644 --- a/pkg/util/log/log_entry.go +++ b/pkg/util/log/log_entry.go @@ -75,9 +75,6 @@ type logEntry struct { // The entry counter. Populated by outputLogEntry(). counter uint64 - // The logging tags. - tags *logtags.Buffer - // The stack trace(s), when processing e.g. a fatal event. stacks []byte @@ -103,23 +100,9 @@ func (e *logEntry) SafeFormat(w interfaces.SafePrinter, _ rune) { w.SafeInt(redact.SafeInt(e.line)) w.SafeRune(' ') } - if e.tags != nil { - w.SafeString("[") - for i, tag := range e.tags.Get() { - if i > 0 { - w.SafeString(",") - } - // TODO(obs-inf/server): this assumes that log tag keys are safe, but this - // is not enforced. We could lint that it is true similar to how we lint - // that the format strings for `log.Infof` etc are const strings. - k := redact.SafeString(tag.Key()) - v := tag.Value() - w.SafeString(k) - if v != nil { - w.SafeRune('=') - w.Print(tag.Value()) - } - } + if e.payload.tags != nil { + w.SafeRune('[') + e.payload.tags.formatToSafeWriter(w, e.payload.redactable) w.SafeString("] ") } @@ -166,9 +149,6 @@ func (e *logEntry) String() string { } type entryPayload struct { - // Whether the payload is redactable or not. - redactable bool - // The actual payload string. // For structured entries, this is the JSON // representation of the payload fields, without the @@ -179,14 +159,33 @@ type entryPayload struct { // in disguise. If it is false, message is a flat string with // no guarantees about content. message string + + // The tags, in a formattable representation. + // + // If redactable below is true, the value part of the + // formattableTags is encoded as a RedactableString. If redactable + // is false, the value part is raw and can contain redaction + // markers. (Same as message above.) + tags formattableTags + + // Whether the payload message is redactable or not. + redactable bool } -func makeRedactablePayload(m redact.RedactableString) entryPayload { - return entryPayload{redactable: true, message: string(m)} +func makeRedactablePayload(ctx context.Context, m redact.RedactableString) entryPayload { + return entryPayload{ + message: string(m), + tags: makeFormattableTags(ctx, true /* redactable */), + redactable: true, + } } -func makeUnsafePayload(m string) entryPayload { - return entryPayload{redactable: false, message: m} +func makeUnsafePayload(ctx context.Context, m string) entryPayload { + return entryPayload{ + message: m, + tags: makeFormattableTags(ctx, false /* redactable */), + redactable: false, + } } // makeEntry creates a logEntry. @@ -200,7 +199,6 @@ func makeEntry(ctx context.Context, s Severity, c Channel, depth int) (res logEn ch: c, version: build.BinaryVersion(), gid: goid.Get(), - tags: logtags.FromContext(ctx), } // Populate file/lineno. @@ -217,7 +215,7 @@ func makeStructuredEntry( res.structured = true _, b := payload.AppendJSONFields(false, nil) - res.payload = makeRedactablePayload(b.ToString()) + res.payload = makeRedactablePayload(ctx, b.ToString()) return res } @@ -245,23 +243,23 @@ func makeUnstructuredEntry( } else { buf.Printf(format, args...) } - res.payload = makeRedactablePayload(buf.RedactableString()) + res.payload = makeRedactablePayload(ctx, buf.RedactableString()) } else { var buf strings.Builder formatArgs(&buf, format, args...) - res.payload = makeUnsafePayload(buf.String()) + res.payload = makeUnsafePayload(ctx, buf.String()) } return res } -var configTagsBuffer = logtags.SingleTagBuffer("config", nil) +var configTagsCtx = logtags.AddTag(context.Background(), "config", nil) // makeStartLine creates a formatted log entry suitable for the start // of a logging output using the canonical logging format. func makeStartLine(formatter logFormatter, format string, args ...interface{}) *buffer { entry := makeUnstructuredEntry( - context.Background(), + configTagsCtx, severity.UNKNOWN, /* header - ignored */ 0, /* header - ignored */ 2, /* depth */ @@ -269,7 +267,6 @@ func makeStartLine(formatter logFormatter, format string, args ...interface{}) * format, args...) entry.header = true - entry.tags = configTagsBuffer return formatter.formatEntry(entry) } @@ -311,8 +308,10 @@ func (e logEntry) convertToLegacy() (res logpb.Entry) { Message: e.payload.message, } - if e.tags != nil { - res.Tags = renderTagsAsString(e.tags, e.payload.redactable) + if e.payload.tags != nil { + var buf buffer + e.payload.tags.formatToBuffer(&buf) + res.Tags = buf.String() } if e.structured { @@ -334,15 +333,6 @@ func (e logEntry) convertToLegacy() (res logpb.Entry) { const structuredEntryPrefix = "Structured entry: " -func renderTagsAsString(tags *logtags.Buffer, redactable bool) string { - if redactable { - return string(renderTagsAsRedactable(tags)) - } - var buf strings.Builder - tags.FormatToString(&buf) - return buf.String() -} - // MakeLegacyEntry creates an logpb.Entry. func MakeLegacyEntry( ctx context.Context, diff --git a/pkg/util/log/logcrash/crash_reporting.go b/pkg/util/log/logcrash/crash_reporting.go index f56344f259e4..561534b10fef 100644 --- a/pkg/util/log/logcrash/crash_reporting.go +++ b/pkg/util/log/logcrash/crash_reporting.go @@ -52,6 +52,7 @@ var ( // Doing this, rather than just using a default of `true`, means that a node // will not errantly send a report using a default before loading settings. DiagnosticsReportingEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, "diagnostics.reporting.enabled", "enable reporting diagnostic metrics to cockroach labs", false, @@ -59,6 +60,7 @@ var ( // CrashReports wraps "diagnostics.reporting.send_crash_reports". CrashReports = settings.RegisterBoolSetting( + settings.TenantWritable, "diagnostics.reporting.send_crash_reports", "send crash and panic reports", true, @@ -66,6 +68,7 @@ var ( // PanicOnAssertions wraps "debug.panic_on_failed_assertions" PanicOnAssertions = settings.RegisterBoolSetting( + settings.TenantWritable, "debug.panic_on_failed_assertions", "panic when an assertion fails rather than reporting", false, diff --git a/pkg/util/log/redact.go b/pkg/util/log/redact.go index 164d4607eb89..f30703dbbfb5 100644 --- a/pkg/util/log/redact.go +++ b/pkg/util/log/redact.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/encoding/encodingtype" "github.com/cockroachdb/errors" - "github.com/cockroachdb/logtags" "github.com/cockroachdb/redact" ) @@ -76,6 +75,7 @@ func getEditor(editMode EditSensitiveData) redactEditor { return func(r redactablePackage) redactablePackage { if !r.redactable { r.msg = []byte(redact.EscapeBytes(r.msg)) + r.tags = formattableTags(redact.EscapeBytes([]byte(r.tags))) r.redactable = true } return r @@ -84,6 +84,7 @@ func getEditor(editMode EditSensitiveData) redactEditor { return func(r redactablePackage) redactablePackage { if r.redactable { r.msg = redact.RedactableBytes(r.msg).StripMarkers() + r.tags = formattableTags(redact.RedactableBytes(r.tags).StripMarkers()) r.redactable = false } return r @@ -92,8 +93,10 @@ func getEditor(editMode EditSensitiveData) redactEditor { return func(r redactablePackage) redactablePackage { if r.redactable { r.msg = []byte(redact.RedactableBytes(r.msg).Redact()) + r.tags = formattableTags(redact.RedactableBytes(r.tags).Redact()) } else { r.msg = redact.RedactedMarker() + r.tags = r.tags.redactTagValues(true /* preserveMarkers */) r.redactable = true } return r @@ -102,9 +105,11 @@ func getEditor(editMode EditSensitiveData) redactEditor { return func(r redactablePackage) redactablePackage { if r.redactable { r.msg = redact.RedactableBytes(r.msg).Redact().StripMarkers() + r.tags = formattableTags(redact.RedactableBytes(r.tags).Redact().StripMarkers()) r.redactable = false } else { r.msg = strippedMarker + r.tags = r.tags.redactTagValues(false /* preserveMarkers */) } return r } @@ -113,22 +118,24 @@ func getEditor(editMode EditSensitiveData) redactEditor { } } -var strippedMarker = redact.RedactableBytes(redact.RedactedMarker()).StripMarkers() +var redactedMarker = redact.RedactedMarker() +var strippedMarker = redact.RedactableBytes(redactedMarker).StripMarkers() // maybeRedactEntry transforms a logpb.Entry to either strip // sensitive data or keep it, or strip the redaction markers or keep them, // or a combination of both. The specific behavior is selected // by the provided redactEditor. -func maybeRedactEntry(payload entryPayload, editor redactEditor) entryPayload { +func maybeRedactEntry(payload entryPayload, editor redactEditor) (res entryPayload) { r := redactablePackage{ redactable: payload.redactable, + tags: payload.tags, msg: []byte(payload.message), } r = editor(r) - return entryPayload{ - redactable: r.redactable, - message: string(r.msg), - } + res.redactable = r.redactable + res.message = string(r.msg) + res.tags = r.tags + return res } // Safe constructs a SafeFormatter / SafeMessager. @@ -167,6 +174,7 @@ func init() { type redactablePackage struct { msg []byte + tags formattableTags redactable bool } @@ -174,26 +182,6 @@ const redactableIndicator = "⋮" var redactableIndicatorBytes = []byte(redactableIndicator) -func renderTagsAsRedactable(tags *logtags.Buffer) redact.RedactableString { - if tags == nil { - return "" - } - var buf redact.StringBuilder - comma := redact.SafeString("") - for _, t := range tags.Get() { - buf.SafeString(comma) - buf.Print(redact.Safe(t.Key())) - if v := t.Value(); v != nil && v != "" { - if len(t.Key()) > 1 { - buf.SafeRune('=') - } - buf.Print(v) - } - comma = "," - } - return buf.RedactableString() -} - // TestingSetRedactable sets the redactable flag on the file output of // the debug logger for usage in a test. The caller is responsible // for calling the cleanup function. This is exported for use in diff --git a/pkg/util/log/redact_test.go b/pkg/util/log/redact_test.go index 65951856642c..b58033795abc 100644 --- a/pkg/util/log/redact_test.go +++ b/pkg/util/log/redact_test.go @@ -21,8 +21,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log/logpb" "github.com/cockroachdb/errors" - "github.com/cockroachdb/logtags" - "github.com/cockroachdb/redact" "github.com/stretchr/testify/assert" ) @@ -92,39 +90,6 @@ func TestRedactedLogOutput(t *testing.T) { } } -func quote(s string) string { - return startRedactable + s + endRedactable -} - -// TestRedactTags ensure that context tags can be redacted. -func TestRedactTags(t *testing.T) { - baseCtx := context.Background() - - testData := []struct { - ctx context.Context - expected string - }{ - {baseCtx, ""}, - {logtags.AddTag(baseCtx, "k", nil), "k"}, - {logtags.AddTag(baseCtx, "k", redact.Unsafe(123)), "k" + quote("123") + ""}, - {logtags.AddTag(baseCtx, "k", 123), "k123"}, - {logtags.AddTag(baseCtx, "k", redact.Safe(123)), "k123"}, - {logtags.AddTag(baseCtx, "k", startRedactable), "k" + quote(escapeMark) + ""}, - {logtags.AddTag(baseCtx, "kg", redact.Unsafe(123)), "kg=" + quote("123") + ""}, - {logtags.AddTag(baseCtx, "kg", 123), "kg=123"}, - {logtags.AddTag(baseCtx, "kg", redact.Safe(123)), "kg=123"}, - {logtags.AddTag(logtags.AddTag(baseCtx, "k", nil), "n", redact.Unsafe(55)), "k,n" + quote("55") + ""}, - {logtags.AddTag(logtags.AddTag(baseCtx, "k", nil), "n", 55), "k,n55"}, - {logtags.AddTag(logtags.AddTag(baseCtx, "k", nil), "n", redact.Safe(55)), "k,n55"}, - } - - for _, tc := range testData { - tags := logtags.FromContext(tc.ctx) - actual := renderTagsAsRedactable(tags) - assert.Equal(t, tc.expected, string(actual)) - } -} - func TestRedactedDecodeFile(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/util/tracing/tracer.go b/pkg/util/tracing/tracer.go index 68876fc43aaa..1bc4d6aec6ff 100644 --- a/pkg/util/tracing/tracer.go +++ b/pkg/util/tracing/tracer.go @@ -92,6 +92,7 @@ const ( // resolved via #58610, this setting can be removed so that all traces // have redactability enabled. var enableTraceRedactable = settings.RegisterBoolSetting( + settings.TenantWritable, "trace.redactable.enabled", "set to true to enable redactability for unstructured events "+ "in traces and to redact traces sent to tenants. "+ @@ -101,12 +102,14 @@ var enableTraceRedactable = settings.RegisterBoolSetting( ) var enableNetTrace = settings.RegisterBoolSetting( + settings.TenantWritable, "trace.debug.enable", "if set, traces for recent requests can be seen at https:///debug/requests", false, ).WithPublic() var openTelemetryCollector = settings.RegisterValidatedStringSetting( + settings.TenantWritable, "trace.opentelemetry.collector", "address of an OpenTelemetry trace collector to receive "+ "traces using the otel gRPC protocol, as :. "+ @@ -122,6 +125,7 @@ var openTelemetryCollector = settings.RegisterValidatedStringSetting( ).WithPublic() var jaegerAgent = settings.RegisterValidatedStringSetting( + settings.TenantWritable, "trace.jaeger.agent", "the address of a Jaeger agent to receive traces using the "+ "Jaeger UDP Thrift protocol, as :. "+ @@ -139,6 +143,7 @@ var jaegerAgent = settings.RegisterValidatedStringSetting( // ZipkinCollector is the cluster setting that specifies the Zipkin instance // to send traces to, if any. var ZipkinCollector = settings.RegisterValidatedStringSetting( + settings.TenantWritable, "trace.zipkin.collector", "the address of a Zipkin instance to receive traces, as :. "+ "If no port is specified, 9411 will be used.",