From a63b81d08b27d91b37796b5b3e56b4e37536e5a4 Mon Sep 17 00:00:00 2001 From: Raphael 'kena' Poss Date: Sat, 19 Aug 2023 13:05:23 +0200 Subject: [PATCH] settings,*: give better names to certain settings Now we have a framework that allows us to rename cluster settings while preserving backward-compatibility, we can clean up the settings whose name didn't pass the linter, and had been "grand-fathered". This also simplifies the linter. The following have been renamed: | Previous name | New name | |------------------------------------------------|----------------------------------------------------| | `kv.raft_log.disable_synchronization_unsafe` | `kv.raft_log.synchronization.disabled` | | `sql.trace.log_statement_execute` | `sql.log.all_statements.enabled` | | `cloudstorage.gs.chunking.retry_timeout` | `cloudstorage.gs.chunking.per_chunk_retry.timeout` | | `trace.debug.enable` | `trace.http_debug_endpoint.enabled` | | `bulkio.restore.memory_monitor_ssts` | `bulkio.restore.sst_memory_limit.enabled` | | `bulkio.restore.use_simple_import_spans` | `bulkio.restore.simple_import_spans.enabled` | | `bulkio.backup.export_request_verbose_tracing` | `bulkio.backup.verbose_tracing.enabled` | | `changefeed.idle_timeout` | `changefeed.auto_idle.timeout` | The following name suffixes have been adjusted to follow our linter guidelines: ``` bulkio.backup.split_keys_on_timestamps - added .enabled changefeed.balance_range_distribution.enable - renamed to .enabled changefeed.batch_reduction_retry_enabled - renamed to .enabled changefeed.new_pubsub_sink_enabled - renamed to .enabled changefeed.new_webhook_sink_enabled - renamed to .enabled changefeed.permissions.require_external_connection_sink - added .enabled debug.panic_on_failed_assertions - added .enabled diagnostics.reporting.send_crash_reports - added .enabled kv.closed_timestamp.follower_reads_enabled - renamed to .enabled kv.range_merge.queue_enabled - renamed to .enabled kv.range_split.by_load_enabled - renamed to .enabled kv.transaction.parallel_commits_enabled - renamed to .enabled kv.transaction.write_pipelining_enabled - renamed to .enabled kv.transaction.write_pipelining_max_batch_size - renamed to .max_batch_size server.clock.forward_jump_check_enabled - renamed to .enabled server.failed_reservation_timeout - renamed to .timeout server.oidc_authentication.autologin - added .enabled server.web_session_timeout - renamed to .timeout sql.distsql.flow_stream_timeout - renamed to .timeout sql.metrics.statement_details.dump_to_logs - added .enabled stream_replication.job_liveness_timeout - renamed to .timeout ``` Release note (cli change): The following user-visible cluster settings have been renamed. The previous name is still available for backward compatibility. | Previous name | New name | |------------------------------------------------|------------------------------------------------------| | `server.web_session_timeout` | `server.web_session.timeout` | | `kv.closed_timestamp.follower_reads_enabled` | `kv.closed_timestamp.follower_reads.enabled` | | `kv.range_split.by_load_enabled` | `kv.range_split.by_load.enabled` | | `changefeed.balance_range_distribution.enable` | `changefeed.balance_range_distribution.enabled` | | `changefeed.batch_reduction_retry_enabled` | `changefeed.batch_reduction_retry.enabled` | | `server.clock.forward_jump_check_enabled` | `server.clock.forward_jump_check.enabled` | | `server.oidc_authentication.autologin` | `server.oidc_authentication.autologin.enabled` | | `sql.metrics.statement_details.dump_to_logs` | `sql.metrics.statement_details.dump_to_logs.enabled` | | `sql.trace.log_statement_execute` | `sql.log.all_statements.enabled` | | `trace.debug.enable` | `trace.http_debug_endpoint.enabled` | --- .../20181227_follower_reads_implementation.md | 2 +- .../20211106_multitenant_cluster_settings.md | 2 +- docs/generated/eventlog.md | 2 +- docs/generated/logging.md | 2 +- .../settings/settings-for-tenants.txt | 16 ++-- docs/generated/settings/settings.html | 20 ++--- pkg/bench/bench_test.go | 2 +- pkg/bench/tpcc/subprocess_commands_test.go | 2 +- pkg/bench/tpcc/tpcc_bench_test.go | 2 +- pkg/ccl/backupccl/backup_processor.go | 2 + pkg/ccl/backupccl/backup_test.go | 8 +- .../backupccl/backuprand/backup_rand_test.go | 4 +- pkg/ccl/backupccl/restore_job.go | 1 + .../backupccl/restore_processor_planning.go | 3 +- pkg/ccl/backupccl/utils_test.go | 2 +- .../changefeedccl/alter_changefeed_test.go | 4 +- pkg/ccl/changefeedccl/cdctest/nemeses.go | 2 +- pkg/ccl/changefeedccl/changefeed_dist.go | 3 +- pkg/ccl/changefeedccl/changefeed_test.go | 4 +- .../changefeedccl/changefeedbase/settings.go | 3 + pkg/ccl/changefeedccl/sink.go | 6 +- pkg/ccl/oidcccl/settings.go | 5 +- pkg/ccl/partitionccl/partition_test.go | 4 - .../serverccl/diagnosticsccl/reporter_test.go | 3 +- .../replicationtestutils/testutils.go | 2 +- pkg/ccl/streamingccl/settings.go | 1 + .../partitioned_stream_client_test.go | 2 +- .../replication_stream_e2e_test.go | 4 +- .../streamproducer/replication_stream_test.go | 8 +- .../schema_change_external_test.go | 2 +- pkg/ccl/workloadccl/allccl/all_test.go | 2 +- pkg/cli/interactive_tests/test_exec_log.tcl | 6 +- .../testdata/explain-bundle/bundle/env.sql | 28 +++--- pkg/cloud/gcp/gcs_storage.go | 1 + pkg/cmd/allocsim/main.go | 2 +- .../githubpost/testdata/stress-failure.json | 2 +- .../githubpost/testdata/stress-fatal.json | 2 +- .../testdata/27595.diff | 2 +- .../kvcoord/txn_interceptor_committer.go | 1 + .../kvcoord/txn_interceptor_pipeliner.go | 4 +- .../kvcoord/txn_interceptor_pipeliner_test.go | 2 +- pkg/kv/kvnemesis/kvnemesis_test.go | 2 +- .../allocator/storepool/store_pool.go | 1 + pkg/kv/kvserver/client_merge_test.go | 4 +- pkg/kv/kvserver/client_replica_test.go | 2 +- pkg/kv/kvserver/closed_timestamp_test.go | 6 +- pkg/kv/kvserver/kvserverbase/base.go | 1 + pkg/kv/kvserver/logstore/logstore.go | 1 + pkg/kv/kvserver/replica_follower_read.go | 1 + pkg/kv/kvserver/replica_learner_test.go | 6 +- pkg/kv/kvserver/replica_rankings_test.go | 4 +- pkg/kv/kvserver/replica_split_load.go | 3 +- pkg/server/authserver/authentication.go | 1 + pkg/server/clock_monotonicity.go | 3 +- pkg/server/import_ts.go | 2 +- pkg/sql/copy_test.go | 2 +- pkg/sql/exec_log.go | 3 +- pkg/sql/flowinfra/flow_registry.go | 1 + .../testdata/logic_test/cluster_settings | 24 +++++ .../testdata/logic_test/distsql_subquery | 2 +- .../logictest/testdata/logic_test/event_log | 2 +- .../testdata/logic_test/event_log_legacy | 2 +- .../testdata/logic_test/poison_after_push | 2 +- .../testdata/logic_test/statement_statistics | 12 +-- pkg/sql/logictest/testdata/logic_test/system | 6 +- pkg/sql/logictest/testdata/logic_test/tenant | 6 +- pkg/sql/scatter_test.go | 2 +- pkg/sql/show_test.go | 87 ++++++------------- pkg/sql/sqlstats/cluster_settings.go | 1 + pkg/sql/sqltestutils/telemetry.go | 2 +- pkg/sql/trace_test.go | 2 +- pkg/testutils/testcluster/testcluster.go | 4 +- pkg/util/log/channel/channel_generated.go | 2 +- pkg/util/log/eventpb/sql_audit_events.proto | 2 +- pkg/util/log/log_channels_generated.go | 44 +++++----- pkg/util/log/logcrash/crash_reporting.go | 6 +- pkg/util/log/logpb/log.proto | 2 +- pkg/util/tracing/tracer.go | 1 + pkg/workload/indexes/indexes.go | 4 +- pkg/workload/workloadsql/workloadsql.go | 4 +- 80 files changed, 228 insertions(+), 209 deletions(-) diff --git a/docs/RFCS/20181227_follower_reads_implementation.md b/docs/RFCS/20181227_follower_reads_implementation.md index 043adb8c041c..54d64c8085c3 100644 --- a/docs/RFCS/20181227_follower_reads_implementation.md +++ b/docs/RFCS/20181227_follower_reads_implementation.md @@ -82,7 +82,7 @@ timestamp for use with an `AS OF SYSTEM TIME` query, this RFC introduces a new SQL function `follower_read_timestamp()` which is effectively a syntactic short-hand for multiplying the above mentioned cluster settings then extends `AS OF SYSTEM TIME` to allow for a non-constant expression. After this change and -the enabling of `kv.closed_timestamp.follower_reads_enabled` clients can +the enabling of `kv.closed_timestamp.follower_reads.enabled` clients can trivially encourage their `SELECT` statements to be directed to physically close replicas. For example, imagine that the kv.kv table exists, the below query would perform a read against the nearest replica: diff --git a/docs/RFCS/20211106_multitenant_cluster_settings.md b/docs/RFCS/20211106_multitenant_cluster_settings.md index 74fb1c03e36e..1f49449f434b 100644 --- a/docs/RFCS/20211106_multitenant_cluster_settings.md +++ b/docs/RFCS/20211106_multitenant_cluster_settings.md @@ -41,7 +41,7 @@ Beyond the obvious usability issues, there are important functional gaps: - in certain cases tenant code may need to consult values for cluster settings that apply to the host cluster: for example - `kv.closed_timestamp.follower_reads_enabled` applies to the KV subsystem but + `kv.closed_timestamp.follower_reads.enabled` applies to the KV subsystem but is read by the SQL code when serving queries. ### Note on SQL settings diff --git a/docs/generated/eventlog.md b/docs/generated/eventlog.md index 71db1212e62b..3d15f5e1ddcd 100644 --- a/docs/generated/eventlog.md +++ b/docs/generated/eventlog.md @@ -526,7 +526,7 @@ Events in this category are logged to the `SQL_EXEC` channel. ### `query_execute` An event of type `query_execute` is recorded when a query is executed, -and the cluster setting `sql.trace.log_statement_execute` is set. +and the cluster setting `sql.log.all_statements.enabled` is set. diff --git a/docs/generated/logging.md b/docs/generated/logging.md index f306cb2e3dc3..8b56eb6b2008 100644 --- a/docs/generated/logging.md +++ b/docs/generated/logging.md @@ -141,7 +141,7 @@ The `SQL_EXEC` channel is used to report SQL execution on behalf of client connections: - Logical SQL statement executions (when enabled via the - `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) + `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) - uncaught Go panic errors during the execution of a SQL statement. ### `SQL_PERF` diff --git a/docs/generated/settings/settings-for-tenants.txt b/docs/generated/settings/settings-for-tenants.txt index 8689ac8c921f..3f385cab2a60 100644 --- a/docs/generated/settings/settings-for-tenants.txt +++ b/docs/generated/settings/settings-for-tenants.txt @@ -12,8 +12,8 @@ bulkio.backup.read_with_priority_after duration 1m0s amount of time since the re bulkio.stream_ingestion.minimum_flush_interval duration 5s the minimum timestamp between flushes; flushes may still occur if internal buffers fill up tenant-rw changefeed.backfill.concurrent_scan_requests integer 0 number of concurrent scan requests per node issued during a backfill tenant-rw changefeed.backfill.scan_request_size integer 524288 the maximum number of bytes returned by each scan request tenant-rw -changefeed.balance_range_distribution.enable boolean false if enabled, the ranges are balanced equally among all nodes tenant-rw -changefeed.batch_reduction_retry_enabled boolean false if true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizes tenant-rw +changefeed.balance_range_distribution.enabled boolean false if enabled, the ranges are balanced equally among all nodes tenant-rw +changefeed.batch_reduction_retry.enabled boolean false if true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizes tenant-rw changefeed.event_consumer_worker_queue_size integer 16 if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can buffer tenant-rw changefeed.event_consumer_workers integer 0 the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabled tenant-rw changefeed.fast_gzip.enabled boolean true use fast gzip implementation tenant-rw @@ -58,7 +58,7 @@ server.auth_log.sql_sessions.enabled boolean false if set, log SQL session login server.authentication_cache.enabled boolean true enables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related information tenant-rw server.child_metrics.enabled boolean false enables the exporting of child metrics, additional prometheus time series with extra labels tenant-rw server.client_cert_expiration_cache.capacity integer 1000 the maximum number of client cert expirations stored tenant-rw -server.clock.forward_jump_check_enabled boolean false if enabled, forward clock jumps > max_offset/2 will cause a panic tenant-rw +server.clock.forward_jump_check.enabled boolean false if enabled, forward clock jumps > max_offset/2 will cause a panic tenant-rw server.clock.persist_upper_bound_interval duration 0s the interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature. tenant-rw server.eventlog.enabled boolean true if set, logged notable events are also stored in the table system.eventlog tenant-rw server.eventlog.ttl duration 2160h0m0s if nonzero, entries in system.eventlog older than this duration are periodically purged tenant-rw @@ -69,7 +69,7 @@ server.identity_map.configuration string system-identity to database-username m server.log_gc.max_deletions_per_cycle integer 1000 the maximum number of entries to delete on each purge of log-like system tables tenant-rw server.log_gc.period duration 1h0m0s the period at which log-like system tables are checked for old entries tenant-rw server.max_connections_per_gateway integer -1 the maximum number of SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit. tenant-rw -server.oidc_authentication.autologin boolean false if true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpoint tenant-rw +server.oidc_authentication.autologin.enabled boolean false if true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpoint tenant-rw server.oidc_authentication.button_text string Log in with your OIDC provider text to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled) tenant-rw server.oidc_authentication.claim_json_key string sets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid) tenant-rw server.oidc_authentication.client_id string sets OIDC client id tenant-rw @@ -95,7 +95,7 @@ server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled boolean t server.user_login.timeout duration 10s timeout after which client authentication times out if some system range is unavailable (0 = no timeout) tenant-rw server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled boolean true if server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256 tenant-rw server.web_session.purge.ttl duration 1h0m0s if nonzero, entries in system.web_sessions older than this duration are periodically purged tenant-rw -server.web_session_timeout duration 168h0m0s the duration that a newly created web session will be valid tenant-rw +server.web_session.timeout duration 168h0m0s the duration that a newly created web session will be valid tenant-rw sql.auth.change_own_password.enabled boolean false controls whether a user is allowed to change their own password, even if they have no other privileges tenant-rw sql.auth.public_schema_create_privilege.enabled boolean true determines whether to grant all users the CREATE privileges on the public schema when it is created tenant-rw sql.auth.resolve_membership_single_scan.enabled boolean true determines whether to populate the role membership cache with a single scan tenant-rw @@ -250,7 +250,7 @@ sql.metrics.max_mem_reported_stmt_fingerprints integer 100000 the maximum number sql.metrics.max_mem_reported_txn_fingerprints integer 100000 the maximum number of reported transaction fingerprints stored in memory tenant-rw sql.metrics.max_mem_stmt_fingerprints integer 100000 the maximum number of statement fingerprints stored in memory tenant-rw sql.metrics.max_mem_txn_fingerprints integer 100000 the maximum number of transaction fingerprints stored in memory tenant-rw -sql.metrics.statement_details.dump_to_logs boolean false dump collected statement statistics to node logs when periodically cleared tenant-rw +sql.metrics.statement_details.dump_to_logs.enabled boolean false dump collected statement statistics to node logs when periodically cleared tenant-rw sql.metrics.statement_details.enabled boolean true collect per-statement query statistics tenant-rw sql.metrics.statement_details.gateway_node.enabled boolean true save the gateway node for each statement fingerprint. If false, the value will be stored as 0. tenant-rw sql.metrics.statement_details.index_recommendation_collection.enabled boolean true generate an index recommendation for each fingerprint ID tenant-rw @@ -289,7 +289,7 @@ sql.telemetry.query_sampling.enabled boolean false when set to true, executed qu sql.telemetry.query_sampling.internal.enabled boolean false when set to true, internal queries will be sampled in telemetry logging tenant-rw sql.temp_object_cleaner.cleanup_interval duration 30m0s how often to clean up orphaned temporary objects tenant-rw sql.temp_object_cleaner.wait_interval duration 30m0s how long after creation a temporary object will be cleaned up tenant-rw -sql.trace.log_statement_execute boolean false set to true to enable logging of executed statements tenant-rw +sql.log.all_statements.enabled boolean false set to true to enable logging of all executed statements tenant-rw sql.trace.session_eventlog.enabled boolean false set to true to enable session tracing; note that enabling this may have a negative performance impact tenant-rw sql.trace.stmt.enable_threshold duration 0s enables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_threshold tenant-rw sql.trace.txn.enable_threshold duration 0s enables tracing on all transactions; transactions open for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting is coarser-grained than sql.trace.stmt.enable_threshold because it applies to all statements within a transaction as well as client communication (e.g. retries) tenant-rw @@ -303,7 +303,7 @@ storage.max_sync_duration.fatal.enabled boolean true if true, fatal the process timeseries.storage.enabled boolean true if set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhere tenant-rw timeseries.storage.resolution_10s.ttl duration 240h0m0s the maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion. tenant-rw timeseries.storage.resolution_30m.ttl duration 2160h0m0s the maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion. tenant-rw -trace.debug.enable boolean false if set, traces for recent requests can be seen at https:///debug/requests tenant-rw +trace.debug_http_endpoint.enabled boolean false if set, traces for recent requests can be seen at https:///debug/requests tenant-rw trace.jaeger.agent string the address of a Jaeger agent to receive traces using the Jaeger UDP Thrift protocol, as :. If no port is specified, 6381 will be used. tenant-rw trace.opentelemetry.collector string address of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as :. If no port is specified, 4317 will be used. tenant-rw trace.snapshot.rate duration 0s if non-zero, interval at which background trace snapshots are captured tenant-rw diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html index 9a1595c67e31..1533fcffa71a 100644 --- a/docs/generated/settings/settings.html +++ b/docs/generated/settings/settings.html @@ -18,8 +18,8 @@
bulkio.stream_ingestion.minimum_flush_interval
duration5sthe minimum timestamp between flushes; flushes may still occur if internal buffers fill upServerless/Dedicated/Self-Hosted
changefeed.backfill.concurrent_scan_requests
integer0number of concurrent scan requests per node issued during a backfillServerless/Dedicated/Self-Hosted
changefeed.backfill.scan_request_size
integer524288the maximum number of bytes returned by each scan requestServerless/Dedicated/Self-Hosted -
changefeed.balance_range_distribution.enable
booleanfalseif enabled, the ranges are balanced equally among all nodesServerless/Dedicated/Self-Hosted -
changefeed.batch_reduction_retry_enabled
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizesServerless/Dedicated/Self-Hosted +
changefeed.balance_range_distribution.enabled
booleanfalseif enabled, the ranges are balanced equally among all nodesServerless/Dedicated/Self-Hosted +
changefeed.batch_reduction_retry.enabled
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizesServerless/Dedicated/Self-Hosted
changefeed.event_consumer_worker_queue_size
integer16if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can bufferServerless/Dedicated/Self-Hosted
changefeed.event_consumer_workers
integer0the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabledServerless/Dedicated/Self-Hosted
changefeed.fast_gzip.enabled
booleantrueuse fast gzip implementationServerless/Dedicated/Self-Hosted @@ -62,10 +62,10 @@
kv.bulk_io_write.max_rate
byte size1.0 TiBthe rate limit (bytes/sec) to use for writes to disk on behalf of bulk io opsDedicated/Self-Hosted
kv.bulk_sst.max_allowed_overage
byte size64 MiBif positive, allowed size in excess of target size for SSTs from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryDedicated/Self-Hosted
kv.bulk_sst.target_size
byte size16 MiBtarget size for SSTs emitted from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryDedicated/Self-Hosted -
kv.closed_timestamp.follower_reads_enabled
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp informationDedicated/Self-Hosted +
kv.closed_timestamp.follower_reads.enabled
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp informationDedicated/Self-Hosted
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventologDedicated/Self-Hosted
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp recordsServerless/Dedicated/Self-Hosted (read-only) -
kv.range_split.by_load_enabled
booleantrueallow automatic splits of ranges based on where load is concentratedDedicated/Self-Hosted +
kv.range_split.by_load.enabled
booleantrueallow automatic splits of ranges based on where load is concentratedDedicated/Self-Hosted
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.rangefeed.enabled
booleanfalseif set, rangefeed registration is enabledServerless/Dedicated/Self-Hosted @@ -86,7 +86,7 @@
server.authentication_cache.enabled
booleantrueenables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related informationServerless/Dedicated/Self-Hosted
server.child_metrics.enabled
booleanfalseenables the exporting of child metrics, additional prometheus time series with extra labelsServerless/Dedicated/Self-Hosted
server.client_cert_expiration_cache.capacity
integer1000the maximum number of client cert expirations storedServerless/Dedicated/Self-Hosted -
server.clock.forward_jump_check_enabled
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panicServerless/Dedicated/Self-Hosted +
server.clock.forward_jump_check.enabled
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panicServerless/Dedicated/Self-Hosted
server.clock.persist_upper_bound_interval
duration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.Serverless/Dedicated/Self-Hosted
server.consistency_check.max_rate
byte size8.0 MiBthe rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.Dedicated/Self-Hosted
server.controller.default_tenant
stringsystemname of the tenant to use to serve requests when clients don't specify a tenantDedicated/Self-Hosted @@ -99,7 +99,7 @@
server.log_gc.max_deletions_per_cycle
integer1000the maximum number of entries to delete on each purge of log-like system tablesServerless/Dedicated/Self-Hosted
server.log_gc.period
duration1h0m0sthe period at which log-like system tables are checked for old entriesServerless/Dedicated/Self-Hosted
server.max_connections_per_gateway
integer-1the maximum number of SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted -
server.oidc_authentication.autologin
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpointServerless/Dedicated/Self-Hosted +
server.oidc_authentication.autologin.enabled
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpointServerless/Dedicated/Self-Hosted
server.oidc_authentication.button_text
stringLog in with your OIDC providertext to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.claim_json_key
stringsets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.client_id
stringsets OIDC client idServerless/Dedicated/Self-Hosted @@ -127,7 +127,7 @@
server.user_login.timeout
duration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout)Serverless/Dedicated/Self-Hosted
server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled
booleantrueif server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256Serverless/Dedicated/Self-Hosted
server.web_session.purge.ttl
duration1h0m0sif nonzero, entries in system.web_sessions older than this duration are periodically purgedServerless/Dedicated/Self-Hosted -
server.web_session_timeout
duration168h0m0sthe duration that a newly created web session will be validServerless/Dedicated/Self-Hosted +
server.web_session.timeout
duration168h0m0sthe duration that a newly created web session will be validServerless/Dedicated/Self-Hosted
spanconfig.bounds.enabled
booleantruedictates whether span config bounds are consulted when serving span configs for secondary tenantsDedicated/Self-Hosted
spanconfig.storage_coalesce_adjacent.enabled
booleantruecollapse adjacent ranges with the same span configs, for the ranges specific to the system tenantDedicated/Self-Hosted
spanconfig.tenant_coalesce_adjacent.enabled
booleantruecollapse adjacent ranges with the same span configs across all secondary tenant keyspacesDedicated/Self-Hosted @@ -204,7 +204,7 @@
sql.metrics.max_mem_reported_txn_fingerprints
integer100000the maximum number of reported transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_stmt_fingerprints
integer100000the maximum number of statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_txn_fingerprints
integer100000the maximum number of transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted -
sql.metrics.statement_details.dump_to_logs
booleanfalsedump collected statement statistics to node logs when periodically clearedServerless/Dedicated/Self-Hosted +
sql.metrics.statement_details.dump_to_logs.enabled
booleanfalsedump collected statement statistics to node logs when periodically clearedServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.enabled
booleantruecollect per-statement query statisticsServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.gateway_node.enabled
booleantruesave the gateway node for each statement fingerprint. If false, the value will be stored as 0.Serverless/Dedicated/Self-Hosted
sql.metrics.statement_details.index_recommendation_collection.enabled
booleantruegenerate an index recommendation for each fingerprint IDServerless/Dedicated/Self-Hosted @@ -243,7 +243,7 @@
sql.telemetry.query_sampling.internal.enabled
booleanfalsewhen set to true, internal queries will be sampled in telemetry loggingServerless/Dedicated/Self-Hosted
sql.temp_object_cleaner.cleanup_interval
duration30m0show often to clean up orphaned temporary objectsServerless/Dedicated/Self-Hosted
sql.temp_object_cleaner.wait_interval
duration30m0show long after creation a temporary object will be cleaned upServerless/Dedicated/Self-Hosted -
sql.trace.log_statement_execute
booleanfalseset to true to enable logging of executed statementsServerless/Dedicated/Self-Hosted +
sql.log.all_statements.enabled
booleanfalseset to true to enable logging of all executed statementsServerless/Dedicated/Self-Hosted
sql.trace.session_eventlog.enabled
booleanfalseset to true to enable session tracing; note that enabling this may have a negative performance impactServerless/Dedicated/Self-Hosted
sql.trace.stmt.enable_threshold
duration0senables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_thresholdServerless/Dedicated/Self-Hosted
sql.trace.txn.enable_threshold
duration0senables tracing on all transactions; transactions open for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting is coarser-grained than sql.trace.stmt.enable_threshold because it applies to all statements within a transaction as well as client communication (e.g. retries)Serverless/Dedicated/Self-Hosted @@ -258,7 +258,7 @@
timeseries.storage.enabled
booleantrueif set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhereServerless/Dedicated/Self-Hosted
timeseries.storage.resolution_10s.ttl
duration240h0m0sthe maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion.Serverless/Dedicated/Self-Hosted
timeseries.storage.resolution_30m.ttl
duration2160h0m0sthe maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.Serverless/Dedicated/Self-Hosted -
trace.debug.enable
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requestsServerless/Dedicated/Self-Hosted +
trace.debug_http_endpoint.enabled
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requestsServerless/Dedicated/Self-Hosted
trace.jaeger.agent
stringthe address of a Jaeger agent to receive traces using the Jaeger UDP Thrift protocol, as <host>:<port>. If no port is specified, 6381 will be used.Serverless/Dedicated/Self-Hosted
trace.opentelemetry.collector
stringaddress of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as <host>:<port>. If no port is specified, 4317 will be used.Serverless/Dedicated/Self-Hosted
trace.snapshot.rate
duration0sif non-zero, interval at which background trace snapshots are capturedServerless/Dedicated/Self-Hosted diff --git a/pkg/bench/bench_test.go b/pkg/bench/bench_test.go index e51bc84db62e..3174c9f74779 100644 --- a/pkg/bench/bench_test.go +++ b/pkg/bench/bench_test.go @@ -549,7 +549,7 @@ func BenchmarkTracing(b *testing.B) { // more expensive. sqlTraceRatio float64 // netTrace, if set, enables use of net.Traces. This is similar to - // the effects of the trace.debug.enable cluster setting. + // the effects of the trace.debug_http_endpoint.enabled cluster setting. netTrace bool } for _, test := range []testSpec{ diff --git a/pkg/bench/tpcc/subprocess_commands_test.go b/pkg/bench/tpcc/subprocess_commands_test.go index f7d542fc44e9..47629319737d 100644 --- a/pkg/bench/tpcc/subprocess_commands_test.go +++ b/pkg/bench/tpcc/subprocess_commands_test.go @@ -71,7 +71,7 @@ var ( db := tc.ServerConn(0) tdb := sqlutils.MakeSQLRunner(db) tdb.Exec(t, "CREATE DATABASE "+databaseName) - tdb.Exec(t, "SET CLUSTER SETTING kv.raft_log.disable_synchronization_unsafe = true") + tdb.Exec(t, "SET CLUSTER SETTING kv.raft_log.synchronization.disabled = true") tdb.Exec(t, "USE "+databaseName) tpcc, err := workload.Get("tpcc") require.NoError(t, err) diff --git a/pkg/bench/tpcc/tpcc_bench_test.go b/pkg/bench/tpcc/tpcc_bench_test.go index b9ae51b1f6a8..0247704f030f 100644 --- a/pkg/bench/tpcc/tpcc_bench_test.go +++ b/pkg/bench/tpcc/tpcc_bench_test.go @@ -67,7 +67,7 @@ func BenchmarkTPCC(b *testing.B) { }, cleanup }), setupStmt(` -SET CLUSTER SETTING kv.raft_log.disable_synchronization_unsafe = true`), +SET CLUSTER SETTING kv.raft_log.synchronization.disabled = true`), } for _, opts := range []options{ diff --git a/pkg/ccl/backupccl/backup_processor.go b/pkg/ccl/backupccl/backup_processor.go index 91a7879938d9..4d98b3ba8784 100644 --- a/pkg/ccl/backupccl/backup_processor.go +++ b/pkg/ccl/backupccl/backup_processor.go @@ -85,6 +85,7 @@ var ( "bulkio.backup.split_keys_on_timestamps", "split backup data on timestamps when writing revision history", true, + settings.WithName("bulkio.backup.split_keys_on_timestamps.enabled"), ) sendExportRequestWithVerboseTracing = settings.RegisterBoolSetting( @@ -92,6 +93,7 @@ var ( "bulkio.backup.export_request_verbose_tracing", "send each export request with a verbose tracing span", util.ConstantWithMetamorphicTestBool("export_request_verbose_tracing", false), + settings.WithName("bulkio.backup.verbose_tracing.enabled"), ) testingDiscardBackupData = envutil.EnvOrDefaultBool("COCKROACH_BACKUP_TESTING_DISCARD_DATA", false) diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index 6f654e0e9bc1..b1bb44f4b9d5 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -6988,7 +6988,7 @@ func TestBackupRestoreCreatedAndDroppedTenant(t *testing.T) { systemDB.Exec(t, "DROP TENANT baz") // Make GC job scheduled by DROP TENANT run in 1 second. - systemDB.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue_enabled = false") + systemDB.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue.enabled = false") systemDB.Exec(t, "ALTER RANGE tenants CONFIGURE ZONE USING gc.ttlseconds = 1;") // Wait for tenant GC job to complete. systemDB.CheckQueryResultsRetry( @@ -7187,7 +7187,7 @@ func TestBackupRestoreTenant(t *testing.T) { ) // Make GC job scheduled by DROP TENANT run in 1 second. - restoreDB.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue_enabled = false") + restoreDB.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue.enabled = false") restoreDB.Exec(t, "ALTER RANGE tenants CONFIGURE ZONE USING gc.ttlseconds = 1;") // Wait for tenant GC job to complete. restoreDB.CheckQueryResultsRetry( @@ -11299,7 +11299,7 @@ func TestRestoreMemoryMonitoringWithShadowing(t *testing.T) { defer cleanupFn() sqlDB.Exec(t, "SET CLUSTER SETTING kv.bulk_io_write.restore_node_concurrency = 1") - sqlDB.Exec(t, "SET CLUSTER SETTING bulkio.restore.memory_monitor_ssts=true") + sqlDB.Exec(t, "SET CLUSTER SETTING bulkio.restore.sst_memory_limit.enabled=true") sqlDB.Exec(t, "BACKUP data.bank INTO 'userfile:///backup'") // Repeatedly alter a single row and do an incremental backup. @@ -11340,7 +11340,7 @@ func TestRestoreMemoryMonitoringMinWorkerMemory(t *testing.T) { // 4 restore workers means we need minimum 2 workers to start restore. sqlDB.Exec(t, "SET CLUSTER SETTING kv.bulk_io_write.restore_node_concurrency=4") - sqlDB.Exec(t, "SET CLUSTER SETTING bulkio.restore.memory_monitor_ssts=true") + sqlDB.Exec(t, "SET CLUSTER SETTING bulkio.restore.sst_memory_limit.enabled=true") sqlDB.Exec(t, "BACKUP data.bank INTO 'userfile:///backup'") diff --git a/pkg/ccl/backupccl/backuprand/backup_rand_test.go b/pkg/ccl/backupccl/backuprand/backup_rand_test.go index 9ceefea4920a..45a71af3e243 100644 --- a/pkg/ccl/backupccl/backuprand/backup_rand_test.go +++ b/pkg/ccl/backupccl/backuprand/backup_rand_test.go @@ -37,7 +37,7 @@ import ( // It tests that full database backup as well as all subsets of per-table backup // roundtrip properly. 50% of the time, the test runs the restore with the // schema_only parameter, which does not restore any rows from user tables. The -// test will also run with bulkio.restore.use_simple_import_spans set to true +// test will also run with bulkio.restore.simple_import_spans.enabled set to true // 50% of the time. func TestBackupRestoreRandomDataRoundtrips(t *testing.T) { defer leaktest.AfterTest(t)() @@ -78,7 +78,7 @@ func TestBackupRestoreRandomDataRoundtrips(t *testing.T) { } if rng.Intn(2) == 0 { - sqlDB.Exec(t, "SET CLUSTER SETTING bulkio.restore.use_simple_import_spans = true") + sqlDB.Exec(t, "SET CLUSTER SETTING bulkio.restore.simple_import_spans.enabled = true") } tables := sqlDB.Query(t, `SELECT name FROM crdb_internal.tables WHERE diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index 1a895dedde99..f88abf00cd56 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -89,6 +89,7 @@ var useSimpleImportSpans = settings.RegisterBoolSetting( "bulkio.restore.use_simple_import_spans", "if set to true, restore will generate its import spans using the makeSimpleImportSpans algorithm", false, + settings.WithName("bulkio.restore.simple_import_spans.enabled"), ) var restoreStatsInsertionConcurrency = settings.RegisterIntSetting( diff --git a/pkg/ccl/backupccl/restore_processor_planning.go b/pkg/ccl/backupccl/restore_processor_planning.go index bb2fe14e6e15..ce0aebf4df47 100644 --- a/pkg/ccl/backupccl/restore_processor_planning.go +++ b/pkg/ccl/backupccl/restore_processor_planning.go @@ -53,8 +53,9 @@ var replanRestoreFrequency = settings.RegisterDurationSetting( var memoryMonitorSSTs = settings.RegisterBoolSetting( settings.TenantWritable, "bulkio.restore.memory_monitor_ssts", - "if true, restore will limit number of simultaneously open SSTs based on available memory", + "if true, restore will limit number of simultaneously open SSTs to keep memory usage under the configured memory fraction", false, + settings.WithName("bulkio.restore.sst_memory_limit.enabled"), ) // distRestore plans a 2 stage distSQL flow for a distributed restore. It diff --git a/pkg/ccl/backupccl/utils_test.go b/pkg/ccl/backupccl/utils_test.go index 509da8eeeb77..ccc998b0f73a 100644 --- a/pkg/ccl/backupccl/utils_test.go +++ b/pkg/ccl/backupccl/utils_test.go @@ -604,7 +604,7 @@ func runTestRestoreMemoryMonitoring(t *testing.T, numSplits, numInc, restoreProc _, sqlDB, _, cleanupFn := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params) defer cleanupFn() - sqlDB.Exec(t, "SET CLUSTER SETTING bulkio.restore.memory_monitor_ssts=true") + sqlDB.Exec(t, "SET CLUSTER SETTING bulkio.restore.sst_memory_limit.enabled=true") sqlDB.Exec(t, "SET CLUSTER SETTING kv.bulk_io_write.restore_node_concurrency=2") // Add some splits in the table, and set the target file size to be something diff --git a/pkg/ccl/changefeedccl/alter_changefeed_test.go b/pkg/ccl/changefeedccl/alter_changefeed_test.go index c1f10d82e212..687b21f07ea3 100644 --- a/pkg/ccl/changefeedccl/alter_changefeed_test.go +++ b/pkg/ccl/changefeedccl/alter_changefeed_test.go @@ -142,7 +142,7 @@ func TestAlterChangefeedAddTargetPrivileges(t *testing.T) { }) // With require_external_connection_sink enabled, the user requires USAGE on the external connection. - rootDB.Exec(t, "SET CLUSTER SETTING changefeed.permissions.require_external_connection_sink = true") + rootDB.Exec(t, "SET CLUSTER SETTING changefeed.permissions.require_external_connection_sink.enabled = true") withUser(t, "user1", func(userDB *sqlutils.SQLRunner) { userDB.ExpectErr(t, "user user1 does not have USAGE privilege on external_connection second", @@ -155,7 +155,7 @@ func TestAlterChangefeedAddTargetPrivileges(t *testing.T) { fmt.Sprintf("ALTER CHANGEFEED %d ADD table_b, table_c set sink='external://second'", jobID), ) }) - rootDB.Exec(t, "SET CLUSTER SETTING changefeed.permissions.require_external_connection_sink = false") + rootDB.Exec(t, "SET CLUSTER SETTING changefeed.permissions.require_external_connection_sink.enabled = false") }) // TODO(#94757): remove CONTROLCHANGEFEED entirely diff --git a/pkg/ccl/changefeedccl/cdctest/nemeses.go b/pkg/ccl/changefeedccl/cdctest/nemeses.go index d44d624fa1f5..d5dcf769415c 100644 --- a/pkg/ccl/changefeedccl/cdctest/nemeses.go +++ b/pkg/ccl/changefeedccl/cdctest/nemeses.go @@ -130,7 +130,7 @@ func RunNemesis( if _, err := db.Exec(`CREATE TABLE foo (id INT PRIMARY KEY, ts STRING DEFAULT '0')`); err != nil { return nil, err } - if _, err := db.Exec(`SET CLUSTER SETTING kv.range_merge.queue_enabled = false`); err != nil { + if _, err := db.Exec(`SET CLUSTER SETTING kv.range_merge.queue.enabled = false`); err != nil { return nil, err } if _, err := db.Exec(`ALTER TABLE foo SPLIT AT VALUES ($1)`, ns.rowCount/2); err != nil { diff --git a/pkg/ccl/changefeedccl/changefeed_dist.go b/pkg/ccl/changefeedccl/changefeed_dist.go index 03b3423b5c1b..9de3de7abfa6 100644 --- a/pkg/ccl/changefeedccl/changefeed_dist.go +++ b/pkg/ccl/changefeedccl/changefeed_dist.go @@ -320,7 +320,8 @@ var enableBalancedRangeDistribution = settings.RegisterBoolSetting( "changefeed.balance_range_distribution.enable", "if enabled, the ranges are balanced equally among all nodes", util.ConstantWithMetamorphicTestBool( - "changefeed.balance_range_distribution.enable", false), + "changefeed.balance_range_distribution.enabled", false), + settings.WithName("changefeed.balance_range_distribution.enabled"), settings.WithPublic) func makePlan( diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 08cdbc86a00e..9a98d09cccde 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -2936,7 +2936,7 @@ func TestChangefeedCreateAuthorizationWithChangefeedPriv(t *testing.T) { }) // With require_external_connection_sink enabled, the user requires USAGE on the external connection. - rootDB.Exec(t, "SET CLUSTER SETTING changefeed.permissions.require_external_connection_sink = true") + rootDB.Exec(t, "SET CLUSTER SETTING changefeed.permissions.require_external_connection_sink.enabled = true") withUser(t, "user1", func(userDB *sqlutils.SQLRunner) { userDB.ExpectErr(t, "pq: the CHANGEFEED privilege on all tables can only be used with external connection sinks", @@ -2949,7 +2949,7 @@ func TestChangefeedCreateAuthorizationWithChangefeedPriv(t *testing.T) { "CREATE CHANGEFEED for table_a, table_b INTO 'external://nope'", ) }) - rootDB.Exec(t, "SET CLUSTER SETTING changefeed.permissions.require_external_connection_sink = false") + rootDB.Exec(t, "SET CLUSTER SETTING changefeed.permissions.require_external_connection_sink.enabled = false") } func TestChangefeedGrant(t *testing.T) { diff --git a/pkg/ccl/changefeedccl/changefeedbase/settings.go b/pkg/ccl/changefeedccl/changefeedbase/settings.go index 2208888dd792..e5bc9aa63341 100644 --- a/pkg/ccl/changefeedccl/changefeedbase/settings.go +++ b/pkg/ccl/changefeedccl/changefeedbase/settings.go @@ -67,6 +67,7 @@ var IdleTimeout = settings.RegisterDurationSetting( "a changefeed will mark itself idle if no changes have been emitted for greater than this duration; if 0, the changefeed will never be marked idle", 10*time.Minute, settings.NonNegativeDuration, + settings.WithName("changefeed.auto_idle.timeout"), ) // FrontierCheckpointFrequency controls the frequency of frontier checkpoints. @@ -217,6 +218,7 @@ var BatchReductionRetryEnabled = settings.RegisterBoolSetting( "changefeed.batch_reduction_retry_enabled", "if true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizes", false, + settings.WithName("changefeed.batch_reduction_retry.enabled"), settings.WithPublic) // UseMuxRangeFeed enables the use of MuxRangeFeed RPC. @@ -280,6 +282,7 @@ var RequireExternalConnectionSink = settings.RegisterBoolSetting( " to create changefeeds with external connection sinks only."+ " see https://www.cockroachlabs.com/docs/stable/create-external-connection.html", false, + settings.WithName("changefeed.permissions.require_external_connection_sink.enabled"), ) // SinkIOWorkers controls the number of IO workers used by sinks that use diff --git a/pkg/ccl/changefeedccl/sink.go b/pkg/ccl/changefeedccl/sink.go index 0d730a835f4a..c0bceca29333 100644 --- a/pkg/ccl/changefeedccl/sink.go +++ b/pkg/ccl/changefeedccl/sink.go @@ -163,7 +163,8 @@ var WebhookV2Enabled = settings.RegisterBoolSetting( "changefeed.new_webhook_sink_enabled", "if enabled, this setting enables a new implementation of the webhook sink"+ " that allows for a much higher throughput", - util.ConstantWithMetamorphicTestBool("changefeed.new_webhook_sink_enabled", false), + util.ConstantWithMetamorphicTestBool("changefeed.new_webhook_sink.enabled", false), + settings.WithName("changefeed.new_webhook_sink.enabled"), ) // PubsubV2Enabled determines whether or not the refactored Webhook sink @@ -173,7 +174,8 @@ var PubsubV2Enabled = settings.RegisterBoolSetting( "changefeed.new_pubsub_sink_enabled", "if enabled, this setting enables a new implementation of the pubsub sink"+ " that allows for a higher throughput", - util.ConstantWithMetamorphicTestBool("changefeed.new_pubsub_sink_enabled", false), + util.ConstantWithMetamorphicTestBool("changefeed.new_pubsub_sink.enabled", false), + settings.WithName("changefeed.new_pubsub_sink.enabled"), ) func getSink( diff --git a/pkg/ccl/oidcccl/settings.go b/pkg/ccl/oidcccl/settings.go index 1f606c29188e..918e04bbba89 100644 --- a/pkg/ccl/oidcccl/settings.go +++ b/pkg/ccl/oidcccl/settings.go @@ -32,7 +32,7 @@ const ( OIDCClaimJSONKeySettingName = baseOIDCSettingName + "claim_json_key" OIDCPrincipalRegexSettingName = baseOIDCSettingName + "principal_regex" OIDCButtonTextSettingName = baseOIDCSettingName + "button_text" - OIDCAutoLoginSettingName = baseOIDCSettingName + "autologin" + OIDCAutoLoginSettingName = baseOIDCSettingName + "autologin.enabled" OIDCGenerateClusterSSOTokenEnabledSettingName = baseOIDCSettingName + "generate_cluster_sso_token.enabled" OIDCGenerateClusterSSOTokenUseTokenSettingName = baseOIDCSettingName + "generate_cluster_sso_token.use_token" @@ -259,10 +259,11 @@ var OIDCButtonText = settings.RegisterStringSetting( // the DB Console. var OIDCAutoLogin = settings.RegisterBoolSetting( settings.TenantWritable, - OIDCAutoLoginSettingName, + "server.oidc_authentication.autologin", "if true, logged-out visitors to the DB Console will be "+ "automatically redirected to the OIDC login endpoint", false, + settings.WithName(OIDCAutoLoginSettingName), settings.WithPublic, ) diff --git a/pkg/ccl/partitionccl/partition_test.go b/pkg/ccl/partitionccl/partition_test.go index aeb566c4b253..8f0e8eb3ada8 100644 --- a/pkg/ccl/partitionccl/partition_test.go +++ b/pkg/ccl/partitionccl/partition_test.go @@ -1194,10 +1194,6 @@ func setupPartitioningTestCluster( sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) sqlDB.Exec(t, `CREATE DATABASE data`) - // Disabling store throttling vastly speeds up rebalancing. - sqlDB.Exec(t, `SET CLUSTER SETTING server.declined_reservation_timeout = '0s'`) - sqlDB.Exec(t, `SET CLUSTER SETTING server.failed_reservation_timeout = '0s'`) - return tc.Conns[0], sqlDB, func() { tc.Stopper().Stop(context.Background()) } diff --git a/pkg/ccl/serverccl/diagnosticsccl/reporter_test.go b/pkg/ccl/serverccl/diagnosticsccl/reporter_test.go index b6d9ca720509..52ea238ee585 100644 --- a/pkg/ccl/serverccl/diagnosticsccl/reporter_test.go +++ b/pkg/ccl/serverccl/diagnosticsccl/reporter_test.go @@ -201,6 +201,7 @@ func TestServerReport(t *testing.T) { require.Equal(t, expected, actual, "expected %d changed settings, got %d: %v", expected, actual, last.AlteredSettings) for key, expected := range map[string]string{ + // Note: this uses setting _keys_, not setting names. "cluster.organization": "", "diagnostics.reporting.send_crash_reports": "false", "server.time_until_store_dead": "1m30s", @@ -446,7 +447,7 @@ func setupCluster(t *testing.T, db *gosql.DB) { _, err = db.Exec(`SET CLUSTER SETTING diagnostics.reporting.enabled = true`) require.NoError(t, err) - _, err = db.Exec(`SET CLUSTER SETTING diagnostics.reporting.send_crash_reports = false`) + _, err = db.Exec(`SET CLUSTER SETTING diagnostics.reporting.send_crash_reports.enabled = false`) require.NoError(t, err) _, err = db.Exec(fmt.Sprintf(`CREATE DATABASE %s`, elemName)) diff --git a/pkg/ccl/streamingccl/replicationtestutils/testutils.go b/pkg/ccl/streamingccl/replicationtestutils/testutils.go index df1e4355dddb..4b565b278253 100644 --- a/pkg/ccl/streamingccl/replicationtestutils/testutils.go +++ b/pkg/ccl/streamingccl/replicationtestutils/testutils.go @@ -449,7 +449,7 @@ var defaultSrcClusterSetting = map[string]string{ `kv.rangefeed.enabled`: `true`, `kv.closed_timestamp.target_duration`: `'1s'`, // Large timeout makes test to not fail with unexpected timeout failures. - `stream_replication.job_liveness_timeout`: `'3m'`, + `stream_replication.job_liveness.timeout`: `'3m'`, `stream_replication.stream_liveness_track_frequency`: `'2s'`, `stream_replication.min_checkpoint_frequency`: `'1s'`, // Make all AddSSTable operation to trigger AddSSTable events. diff --git a/pkg/ccl/streamingccl/settings.go b/pkg/ccl/streamingccl/settings.go index 1e91c9f9f010..01b627effa4c 100644 --- a/pkg/ccl/streamingccl/settings.go +++ b/pkg/ccl/streamingccl/settings.go @@ -39,6 +39,7 @@ var StreamReplicationJobLivenessTimeout = settings.RegisterDurationSetting( "stream_replication.job_liveness_timeout", "controls how long we wait for to kill an inactive producer job", 3*24*time.Hour, + settings.WithName("stream_replication.job_liveness.timeout"), ) // StreamReplicationConsumerHeartbeatFrequency controls frequency the stream replication diff --git a/pkg/ccl/streamingccl/streamclient/partitioned_stream_client_test.go b/pkg/ccl/streamingccl/streamclient/partitioned_stream_client_test.go index 02d950d0e555..e6c16dce33b3 100644 --- a/pkg/ccl/streamingccl/streamclient/partitioned_stream_client_test.go +++ b/pkg/ccl/streamingccl/streamclient/partitioned_stream_client_test.go @@ -83,7 +83,7 @@ func TestPartitionedStreamReplicationClient(t *testing.T) { ctx := context.Background() // Makes sure source cluster producer job does not time out within test timeout h.SysSQL.Exec(t, ` -SET CLUSTER SETTING stream_replication.job_liveness_timeout = '500s'; +SET CLUSTER SETTING stream_replication.job_liveness.timeout = '500s'; `) tenant.SQL.Exec(t, ` CREATE DATABASE d; diff --git a/pkg/ccl/streamingccl/streamingest/replication_stream_e2e_test.go b/pkg/ccl/streamingccl/streamingest/replication_stream_e2e_test.go index 9a2e0ca0a04c..f5206db1fca2 100644 --- a/pkg/ccl/streamingccl/streamingest/replication_stream_e2e_test.go +++ b/pkg/ccl/streamingccl/streamingest/replication_stream_e2e_test.go @@ -47,7 +47,7 @@ func TestTenantStreamingProducerJobTimedOut(t *testing.T) { ctx := context.Background() args := replicationtestutils.DefaultTenantStreamingClustersArgs - args.SrcClusterSettings[`stream_replication.job_liveness_timeout`] = `'1m'` + args.SrcClusterSettings[`stream_replication.job_liveness.timeout`] = `'1m'` c, cleanup := replicationtestutils.CreateTenantStreamingClusters(ctx, t, args) defer cleanup() @@ -67,7 +67,7 @@ func TestTenantStreamingProducerJobTimedOut(t *testing.T) { // Make producer job easily times out c.SrcSysSQL.ExecMultiple(t, replicationtestutils.ConfigureClusterSettings(map[string]string{ - `stream_replication.job_liveness_timeout`: `'100ms'`, + `stream_replication.job_liveness.timeout`: `'100ms'`, })...) jobutils.WaitForJobToFail(c.T, c.SrcSysSQL, jobspb.JobID(producerJobID)) diff --git a/pkg/ccl/streamingccl/streamproducer/replication_stream_test.go b/pkg/ccl/streamingccl/streamproducer/replication_stream_test.go index 9f214024ab47..1e8df412aaa3 100644 --- a/pkg/ccl/streamingccl/streamproducer/replication_stream_test.go +++ b/pkg/ccl/streamingccl/streamproducer/replication_stream_test.go @@ -256,7 +256,7 @@ func TestReplicationStreamInitialization(t *testing.T) { defer cleanupTenant() // Makes the stream time out really soon - h.SysSQL.Exec(t, "SET CLUSTER SETTING stream_replication.job_liveness_timeout = '10ms'") + h.SysSQL.Exec(t, "SET CLUSTER SETTING stream_replication.job_liveness.timeout = '10ms'") h.SysSQL.Exec(t, "SET CLUSTER SETTING stream_replication.stream_liveness_track_frequency = '1ms'") t.Run("failed-after-timeout", func(t *testing.T) { replicationProducerSpec := h.StartReplicationStream(t, testTenantName) @@ -268,7 +268,7 @@ func TestReplicationStreamInitialization(t *testing.T) { }) // Make sure the stream does not time out within the test timeout - h.SysSQL.Exec(t, "SET CLUSTER SETTING stream_replication.job_liveness_timeout = '500s'") + h.SysSQL.Exec(t, "SET CLUSTER SETTING stream_replication.job_liveness.timeout = '500s'") t.Run("continuously-running-within-timeout", func(t *testing.T) { replicationProducerSpec := h.StartReplicationStream(t, testTenantName) streamID := replicationProducerSpec.StreamID @@ -600,7 +600,7 @@ func TestCompleteStreamReplication(t *testing.T) { // Make the producer job times out fast and fastly tracks ingestion cutover signal. h.SysSQL.ExecMultiple(t, - "SET CLUSTER SETTING stream_replication.job_liveness_timeout = '2s';", + "SET CLUSTER SETTING stream_replication.job_liveness.timeout = '2s';", "SET CLUSTER SETTING stream_replication.stream_liveness_track_frequency = '2s';") replicationProducerSpec := h.StartReplicationStream(t, testTenantName) @@ -608,7 +608,7 @@ func TestCompleteStreamReplication(t *testing.T) { jobutils.WaitForJobToFail(t, h.SysSQL, jobspb.JobID(timedOutStreamID)) // Makes the producer job not easily time out. - h.SysSQL.Exec(t, "SET CLUSTER SETTING stream_replication.job_liveness_timeout = '10m';") + h.SysSQL.Exec(t, "SET CLUSTER SETTING stream_replication.job_liveness.timeout = '10m';") testCompleteStreamReplication := func(t *testing.T, successfulIngestion bool) { // Verify no error when completing a timed out replication stream. h.SysSQL.Exec(t, "SELECT crdb_internal.complete_replication_stream($1, $2)", diff --git a/pkg/ccl/testccl/workload/schemachange/schema_change_external_test.go b/pkg/ccl/testccl/workload/schemachange/schema_change_external_test.go index fd8095810d9d..e03f5d1a7b51 100644 --- a/pkg/ccl/testccl/workload/schemachange/schema_change_external_test.go +++ b/pkg/ccl/testccl/workload/schemachange/schema_change_external_test.go @@ -61,7 +61,7 @@ func TestWorkload(t *testing.T) { tdb.Exec(t, "CREATE USER testuser") tdb.Exec(t, "CREATE DATABASE schemachange") tdb.Exec(t, "GRANT admin TO testuser") - tdb.Exec(t, "SET CLUSTER SETTING sql.trace.log_statement_execute = true") + tdb.Exec(t, "SET CLUSTER SETTING sql.log.all_statements.enabled = true") // Grab a backup and also print the namespace and descriptor tables upon // failure. diff --git a/pkg/ccl/workloadccl/allccl/all_test.go b/pkg/ccl/workloadccl/allccl/all_test.go index dd51ad02c84c..95402f1b5c22 100644 --- a/pkg/ccl/workloadccl/allccl/all_test.go +++ b/pkg/ccl/workloadccl/allccl/all_test.go @@ -158,7 +158,7 @@ func TestAllRegisteredSetup(t *testing.T) { }) defer s.Stopper().Stop(ctx) sqlutils.MakeSQLRunner(db).Exec(t, `CREATE DATABASE d`) - sqlutils.MakeSQLRunner(db).Exec(t, `SET CLUSTER SETTING kv.range_merge.queue_enabled = false`) + sqlutils.MakeSQLRunner(db).Exec(t, `SET CLUSTER SETTING kv.range_merge.queue.enabled = false`) var l workloadsql.InsertsDataLoader if _, err := workloadsql.Setup(ctx, db, gen, l); err != nil { diff --git a/pkg/cli/interactive_tests/test_exec_log.tcl b/pkg/cli/interactive_tests/test_exec_log.tcl index f1f0d01f28fa..05d8d3c2d9d8 100644 --- a/pkg/cli/interactive_tests/test_exec_log.tcl +++ b/pkg/cli/interactive_tests/test_exec_log.tcl @@ -14,7 +14,7 @@ system "if test -e $logfile; then false; fi" end_test start_test "Check that the exec log is created after enabled" -send "SET CLUSTER SETTING sql.trace.log_statement_execute = TRUE;\r" +send "SET CLUSTER SETTING sql.log.all_statements.enabled = TRUE;\r" eexpect "SET CLUSTER SETTING" eexpect root@ system "test -e $logfile" @@ -33,7 +33,7 @@ eexpect "does not exist" eexpect root@ # Check logging after disable -send "SET CLUSTER SETTING sql.trace.log_statement_execute = FALSE;\r" +send "SET CLUSTER SETTING sql.log.all_statements.enabled = FALSE;\r" eexpect root@ send "SELECT 'lov' || 'ely';\r" eexpect "lovely" @@ -59,7 +59,7 @@ system "if grep -q lovely $logfile; then false; fi" end_test # Re-enable logging for the next test. -send "SET CLUSTER SETTING sql.trace.log_statement_execute = TRUE;\r" +send "SET CLUSTER SETTING sql.log.all_statements.enabled = TRUE;\r" eexpect "SET CLUSTER SETTING" eexpect root@ diff --git a/pkg/cli/testdata/explain-bundle/bundle/env.sql b/pkg/cli/testdata/explain-bundle/bundle/env.sql index 37896e985e24..55bcf1e321ce 100644 --- a/pkg/cli/testdata/explain-bundle/bundle/env.sql +++ b/pkg/cli/testdata/explain-bundle/bundle/env.sql @@ -43,7 +43,7 @@ -- diagnostics.forced_sql_stat_reset.interval = 2h0m0s (interval after which SQL statement statistics are refreshed even if not collected (should be more than diagnostics.sql_stat_reset.interval). It has a max value of 24H.) -- diagnostics.reporting.enabled = true (enable reporting diagnostic metrics to cockroach labs) -- diagnostics.reporting.interval = 1h0m0s (interval at which diagnostics data should be reported) --- diagnostics.reporting.send_crash_reports = true (send crash and panic reports) +-- diagnostics.reporting.send_crash_reports.enabled = true (send crash and panic reports) -- diagnostics.sql_stat_reset.interval = 1h0m0s (interval controlling how often SQL statement statistics should be reset (should be less than diagnostics.forced_sql_stat_reset.interval). It has a max value of 24H.) -- enterprise.license = (the encoded cluster license) -- external.graphite.endpoint = (if nonempty, push server metrics to the Graphite or Carbon server at the specified host:port) @@ -90,7 +90,7 @@ -- kv.bulk_sst.sync_size = 512 KiB (threshold after which non-Rocks SST writes must fsync (0 disables)) -- kv.bulk_sst.target_size = 64 MiB (target size for SSTs emitted from export requests) -- kv.closed_timestamp.close_fraction = 0.2 (fraction of closed timestamp target duration specifying how frequently the closed timestamp is advanced) --- kv.closed_timestamp.follower_reads_enabled = true (allow (all) replicas to serve consistent historical reads based on closed timestamp information) +-- kv.closed_timestamp.follower_reads.enabled = true (allow (all) replicas to serve consistent historical reads based on closed timestamp information) -- kv.closed_timestamp.lead_for_global_reads_override = 0s (if nonzero, overrides the lead time that global_read ranges use to publish closed timestamps) -- kv.closed_timestamp.side_transport_interval = 200ms (the interval at which the closed-timestamp side-transport attempts to advance each range's closed timestamp; set to 0 to disable the side-transport) -- kv.closed_timestamp.target_duration = 3s (if nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this duration) @@ -116,13 +116,13 @@ -- kv.protectedts.reconciliation.interval = 5m0s (the frequency for reconciling jobs with protected timestamp records) -- kv.queue.process.guaranteed_time_budget = 1m0s (the guaranteed duration before which the processing of a queue may time out) -- kv.raft.command.max_size = 64 MiB (maximum size of a raft command) --- kv.raft_log.disable_synchronization_unsafe = false (set to true to disable synchronization on Raft log writes to persistent storage. Setting to true risks data loss or data corruption on server crashes. The setting is meant for internal testing only and SHOULD NOT be used in production.) +-- kv.raft_log.synchronization.disabled = false (set to true to disable synchronization on Raft log writes to persistent storage. Setting to true risks data loss or data corruption on server crashes. The setting is meant for internal testing only and SHOULD NOT be used in production.) -- kv.range.backpressure_byte_tolerance = 32 MiB (defines the number of bytes above the product of backpressure_range_size_multiplier and the range_max_size at which backpressure will not apply) -- kv.range.backpressure_range_size_multiplier = 2 (multiple of range_max_bytes that a range is allowed to grow to without splitting before writes to that range are blocked, or 0 to disable) -- kv.range_descriptor_cache.size = 1000000 (maximum number of entries in the range descriptor cache) --- kv.range_merge.queue_enabled = true (whether the automatic merge queue is enabled) +-- kv.range_merge.queue.enabled = true (whether the automatic merge queue is enabled) -- kv.range_merge.queue_interval = 5s (how long the merge queue waits between processing replicas) --- kv.range_split.by_load_enabled = true (allow automatic splits of ranges based on where load is concentrated) +-- kv.range_split.by_load.enabled = true (allow automatic splits of ranges based on where load is concentrated) -- kv.range_split.by_load_merge_delay = 5m0s (the delay that range splits created due to load will wait before considering being merged away) -- kv.range_split.load_qps_threshold = 2500 (the QPS over which, the range becomes a candidate for load based splitting) -- kv.rangefeed.closed_timestamp_refresh_interval = 0s (the interval at which closed-timestamp updatesare delivered to rangefeeds; set to 0 to use kv.closed_timestamp.side_transport_interval) @@ -139,9 +139,9 @@ -- kv.tenant_rate_limiter.rate_limit = 200 (per-tenant rate limit in Request Units per second) -- kv.transaction.max_intents_bytes = 4194304 (maximum number of bytes used to track locks in transactions) -- kv.transaction.max_refresh_spans_bytes = 256000 (maximum number of bytes used to track refresh spans in serializable transactions) --- kv.transaction.parallel_commits_enabled = true (if enabled, transactional commits will be parallelized with transactional writes) +-- kv.transaction.parallel_commits.enabled = true (if enabled, transactional commits will be parallelized with transactional writes) -- kv.transaction.reject_over_max_intents_budget.enabled = false (if set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressed) --- kv.transaction.write_pipelining_enabled = true (if enabled, transactional writes are pipelined through Raft consensus) +-- kv.transaction.write_pipelining.enabled = true (if enabled, transactional writes are pipelined through Raft consensus) -- kv.transaction.write_pipelining_max_batch_size = 128 (if non-zero, defines that maximum size batch that will be pipelined through Raft consensus) -- rocksdb.ingest_backpressure.l0_file_count_threshold = 20 (number of L0 files after which to backpressure SST ingestions) -- rocksdb.ingest_backpressure.max_delay = 5s (maximum amount of time to backpressure a single SST ingestion) @@ -157,21 +157,19 @@ -- server.auth_log.sql_sessions.enabled = false (if set, log SQL session login/disconnection events (note: may hinder performance on loaded nodes)) -- server.authentication_cache.enabled = true (enables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related information) -- server.child_metrics.enabled = false (enables the exporting of child metrics, additional prometheus time series with extra labels) --- server.clock.forward_jump_check_enabled = false (if enabled, forward clock jumps > max_offset/2 will cause a panic) +-- server.clock.forward_jump_check.enabled = false (if enabled, forward clock jumps > max_offset/2 will cause a panic) -- server.clock.persist_upper_bound_interval = 0s (the interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.) -- server.consistency_check.interval = 24h0m0s (the time between range consistency checks; set to 0 to disable consistency checking. Note that intervals that are too short can negatively impact performance.) -- server.consistency_check.max_rate = 8.0 MiB (the rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.) -- server.cpu_profile.total_dump_size_limit = 128 MiB (maximum combined disk size of preserved CPU profiles) --- server.declined_reservation_timeout = 1s (the amount of time to consider the store throttled for up-replication after a reservation was declined) -- server.eventlog.enabled = true (if set, logged notable events are also stored in the table system.eventlog) -- server.eventlog.ttl = 2160h0m0s (if nonzero, entries in system.eventlog older than this duration are deleted every 10m0s. Should not be lowered below 24 hours.) --- server.failed_reservation_timeout = 5s (the amount of time to consider the store throttled for up-replication after a failed reservation call) -- server.goroutine_dump.num_goroutines_threshold = 1000 (a threshold beyond which if number of goroutines increases, then goroutine dump can be triggered) -- server.goroutine_dump.total_dump_size_limit = 500 MiB (total size of goroutine dumps to be kept. Dumps are GC'ed in the order of creation time. The latest dump is always kept even if its size exceeds the limit.) -- server.host_based_authentication.configuration = (host-based authentication configuration to use during connection authentication) -- server.mem_profile.max_profiles = 5 (maximum number of profiles to be kept per ramp-up of memory usage. A ramp-up is defined as a sequence of profiles with increasing usage.) -- server.mem_profile.total_dump_size_limit = 128 MiB (maximum combined disk size of preserved memory profiles) --- server.oidc_authentication.autologin = false (if true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpoint (this feature is experimental)) +-- server.oidc_authentication.autologin.enabled = false (if true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpoint (this feature is experimental)) -- server.oidc_authentication.button_text = Login with your OIDC provider (text to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled) (this feature is experimental)) -- server.oidc_authentication.claim_json_key = (sets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid) (this feature is experimental)) -- server.oidc_authentication.client_id = (sets OIDC client id (this feature is experimental)) @@ -240,7 +238,7 @@ -- sql.defaults.stub_catalog_tables.enabled = true (default value for stub_catalog_tables session setting) -- sql.defaults.vectorize = on (default vectorize mode [on = 0, on = 2, experimental_always = 3, off = 4]) -- sql.defaults.zigzag_join.enabled = true (default value for enable_zigzag_join session setting; allows use of zig-zag join by default) --- sql.distsql.flow_stream_timeout = 10s (amount of time incoming streams wait for a flow to be set up before erroring out) +-- sql.distsql.flow_stream.timeout = 10s (amount of time incoming streams wait for a flow to be set up before erroring out) -- sql.distsql.max_running_flows = 500 (maximum number of concurrent flows that can be run on a node) -- sql.distsql.temp_storage.hash_agg.enabled = true (set to false to disable hash aggregator disk spilling (this will improve performance, but the query might hit the memory limit)) -- sql.distsql.temp_storage.workmem = 64 MiB (maximum amount of memory in bytes a processor can use before falling back to temp storage) @@ -254,7 +252,7 @@ -- sql.metrics.max_mem_reported_txn_fingerprints = 100000 (the maximum number of reported transaction fingerprints stored in memory) -- sql.metrics.max_mem_stmt_fingerprints = 100000 (the maximum number of statement fingerprints stored in memory) -- sql.metrics.max_mem_txn_fingerprints = 100000 (the maximum number of transaction fingerprints stored in memory) --- sql.metrics.statement_details.dump_to_logs = false (dump collected statement statistics to node logs when periodically cleared) +-- sql.metrics.statement_details.dump_to_logs.enabled = false (dump collected statement statistics to node logs when periodically cleared) -- sql.metrics.statement_details.enabled = true (collect per-statement query statistics) -- sql.metrics.statement_details.plan_collection.enabled = true (periodically save a logical plan for each fingerprint) -- sql.metrics.statement_details.plan_collection.period = 5m0s (the time until a new logical plan is collected) @@ -280,7 +278,7 @@ -- sql.stmt_diagnostics.poll_interval = 10s (rate at which the stmtdiagnostics.Registry polls for requests, set to zero to disable) -- sql.tablecache.lease.refresh_limit = 500 (maximum number of descriptors to periodically refresh leases for) -- sql.temp_object_cleaner.cleanup_interval = 30m0s (how often to clean up orphaned temporary objects) --- sql.trace.log_statement_execute = false (set to true to enable logging of executed statements) +-- sql.log.all_statements.enabled = false (set to true to enable logging of executed statements) -- sql.trace.session_eventlog.enabled = false (set to true to enable session tracing. Note that enabling this may have a non-trivial negative performance impact.) -- sql.trace.stmt.enable_threshold = 0s (duration beyond which all statements are traced (set to 0 to disable). This applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_threshold.) -- sql.trace.txn.enable_threshold = 0s (duration beyond which all transactions are traced (set to 0 to disable). This setting is coarser grained thansql.trace.stmt.enable_threshold because it applies to all statements within a transaction as well as client communication (e.g. retries).) @@ -300,7 +298,7 @@ -- timeseries.storage.resolution_30m.ttl = 2160h0m0s (the maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.) -- trace.datadog.agent = (if set, traces will be sent to this DataDog agent; use : or "default" for localhost:8126. Only one tracer can be configured at a time.) -- trace.datadog.project = CockroachDB (the project under which traces will be reported to the DataDog agent if trace.datadog.agent is set. Only one tracer can be configured at a time.) --- trace.debug.enable = false (if set, traces for recent requests can be seen at https:///debug/requests) +-- trace.debug_http_endpoint.enabled = false (if set, traces for recent requests can be seen at https:///debug/requests) -- trace.lightstep.token = (if set, traces go to Lightstep using this token) -- trace.zipkin.collector = (if set, traces go to the given Zipkin instance (example: '127.0.0.1:9411'). Only one tracer can be configured at a time.) -- version = 21.1-118 (set the active cluster version in the format '.') diff --git a/pkg/cloud/gcp/gcs_storage.go b/pkg/cloud/gcp/gcs_storage.go index c0b96acda84f..166a4711ced2 100644 --- a/pkg/cloud/gcp/gcs_storage.go +++ b/pkg/cloud/gcp/gcs_storage.go @@ -74,6 +74,7 @@ var gcsChunkRetryTimeout = settings.RegisterDurationSetting( "cloudstorage.gs.chunking.retry_timeout", "per-chunk retry deadline when chunking of file upload to Google Cloud Storage", 60*time.Second, + settings.WithName("cloudstorage.gs.chunking.per_chunk_retry.timeout"), ) func parseGSURL(_ cloud.ExternalStorageURIContext, uri *url.URL) (cloudpb.ExternalStorage, error) { diff --git a/pkg/cmd/allocsim/main.go b/pkg/cmd/allocsim/main.go index fb76f79809c4..9882fcfa70b8 100644 --- a/pkg/cmd/allocsim/main.go +++ b/pkg/cmd/allocsim/main.go @@ -541,7 +541,7 @@ func main() { c.Start(context.Background()) defer c.Close() c.UpdateZoneConfig(1, 1<<20) - _, err := c.Nodes[0].DB().Exec("SET CLUSTER SETTING kv.raft_log.disable_synchronization_unsafe = true") + _, err := c.Nodes[0].DB().Exec("SET CLUSTER SETTING kv.raft_log.synchronization.disabled = true") if err != nil { log.Fatalf(context.Background(), "%v", err) } diff --git a/pkg/cmd/bazci/githubpost/testdata/stress-failure.json b/pkg/cmd/bazci/githubpost/testdata/stress-failure.json index 75ec2d6803b6..cffbd135f909 100644 --- a/pkg/cmd/bazci/githubpost/testdata/stress-failure.json +++ b/pkg/cmd/bazci/githubpost/testdata/stress-failure.json @@ -15,7 +15,7 @@ {"Action":"run","Test":"TestReplicateQueueRebalance"} {"Action":"output","Test":"TestReplicateQueueRebalance","Output":"=== RUN TestReplicateQueueRebalance\n"} {"Action":"output","Test":"TestReplicateQueueRebalance","Output":"W180711 20:06:50.873091 39 server/status/runtime.go:143 Could not parse build timestamp: parsing time \"\" as \"2006/01/02 15:04:05\": cannot parse \"\" as \"2006\"\n"} -{"Action":"output","Test":"TestReplicateQueueRebalance","Output":"I180711 20:06:50.883719 39 server/server.go:794 [n?] monitoring forward clock jumps based on server.clock.forward_jump_check_enabled\n"} +{"Action":"output","Test":"TestReplicateQueueRebalance","Output":"I180711 20:06:50.883719 39 server/server.go:794 [n?] monitoring forward clock jumps based on server.clock.forward_jump_check.enabled\n"} {"Action":"output","Test":"TestReplicateQueueRebalance","Output":"I180711 20:06:50.887957 39 server/config.go:545 [n?] 1 storage engine initialized\n"} {"Action":"output","Test":"TestReplicateQueueRebalance","Output":"I180711 20:06:50.887979 39 server/config.go:548 [n?] RocksDB cache size: 128 MiB\n"} {"Action":"output","Test":"TestReplicateQueueRebalance","Output":"I180711 20:06:53.142470 3621 storage/replica.go:835 [replicaGC,n1,s1,r6/1:/{System/tse-Table/System…}] removed 8 (0+8) keys in 0ms [clear=0ms commit=0ms]\n"} diff --git a/pkg/cmd/bazci/githubpost/testdata/stress-fatal.json b/pkg/cmd/bazci/githubpost/testdata/stress-fatal.json index 01db956e1548..af1515ee7eac 100644 --- a/pkg/cmd/bazci/githubpost/testdata/stress-fatal.json +++ b/pkg/cmd/bazci/githubpost/testdata/stress-fatal.json @@ -6,7 +6,7 @@ {"Action":"run","Test":"TestGossipHandlesReplacedNode"} {"Action":"output","Test":"TestGossipHandlesReplacedNode","Output":"=== RUN TestGossipHandlesReplacedNode\n"} {"Action":"output","Test":"TestGossipHandlesReplacedNode","Output":"W180711 20:13:15.808212 83 server/status/runtime.go:143 Could not parse build timestamp: parsing time \"\" as \"2006/01/02 15:04:05\": cannot parse \"\" as \"2006\"\n"} -{"Action":"output","Test":"TestGossipHandlesReplacedNode","Output":"I180711 20:13:15.814446 83 server/server.go:794 [n?] monitoring forward clock jumps based on server.clock.forward_jump_check_enabled\n"} +{"Action":"output","Test":"TestGossipHandlesReplacedNode","Output":"I180711 20:13:15.814446 83 server/server.go:794 [n?] monitoring forward clock jumps based on server.clock.forward_jump_check.enabled\n"} {"Action":"output","Test":"TestGossipHandlesReplacedNode","Output":"I180711 20:13:15.818214 83 server/config.go:545 [n?] 1 storage engine initialized\n"} {"Action":"output","Test":"TestGossipHandlesReplacedNode","Output":"I180711 20:13:15.818267 83 server/config.go:548 [n?] RocksDB cache size: 128 MiB\n"} {"Action":"output","Test":"TestGossipHandlesReplacedNode","Output":"I180711 20:13:15.818307 83 server/config.go:548 [n?] store 0: in-memory, size 0 B\n"} diff --git a/pkg/cmd/github-pull-request-make/testdata/27595.diff b/pkg/cmd/github-pull-request-make/testdata/27595.diff index 2129f5086cf6..ca67e0fd56e6 100644 --- a/pkg/cmd/github-pull-request-make/testdata/27595.diff +++ b/pkg/cmd/github-pull-request-make/testdata/27595.diff @@ -4,7 +4,7 @@ index 90ebcf30ab1..46f33476c42 100644 +++ b/docs/generated/settings/settings.html @@ -37,6 +37,8 @@ rocksdb.min_wal_sync_intervalduration0sminimum duration between syncs of the RocksDB WAL - server.clock.forward_jump_check_enabledbooleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panic. + server.clock.forward_jump_check.enabledbooleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panic. server.clock.persist_upper_bound_intervalduration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature. +server.closed_timestamp.close_fractionfloat0.2desc +server.closed_timestamp.target_durationduration5sif nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this duration diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go index 572217d6b4b7..86f33f0c3a2b 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go @@ -30,6 +30,7 @@ var parallelCommitsEnabled = settings.RegisterBoolSetting( "kv.transaction.parallel_commits_enabled", "if enabled, transactional commits will be parallelized with transactional writes", true, + settings.WithName("kv.transaction.parallel_commits.enabled"), ) // txnCommitter is a txnInterceptor that concerns itself with committing and diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go index 52d222423a18..4e0ae3f9a150 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go @@ -31,12 +31,13 @@ import ( // The degree of the inFlightWrites btree. const txnPipelinerBtreeDegree = 32 -// PipelinedWritesEnabled is the kv.transaction.write_pipelining_enabled cluster setting. +// PipelinedWritesEnabled is the kv.transaction.write_pipelining.enabled cluster setting. var PipelinedWritesEnabled = settings.RegisterBoolSetting( settings.TenantWritable, "kv.transaction.write_pipelining_enabled", "if enabled, transactional writes are pipelined through Raft consensus", true, + settings.WithName("kv.transaction.write_pipelining.enabled"), ) var pipelinedWritesMaxBatchSize = settings.RegisterIntSetting( settings.TenantWritable, @@ -52,6 +53,7 @@ var pipelinedWritesMaxBatchSize = settings.RegisterIntSetting( // hit the 1PC fast-path or should have batches which exceed this limit. 128, settings.NonNegativeInt, + settings.WithName("kv.transaction.write_pipelining.max_batch_size"), ) // TrackedWritesMaxSize is a byte threshold for the tracking of writes performed diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_test.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_test.go index 55cad957943e..e168bb3a553c 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_test.go @@ -1256,7 +1256,7 @@ func TestTxnPipelinerMaxInFlightSize(t *testing.T) { } // TestTxnPipelinerMaxBatchSize tests that batches that contain more requests -// than allowed by the kv.transaction.write_pipelining_max_batch_size setting +// than allowed by the kv.transaction.write_pipelining.max_batch_size setting // will not be pipelined. func TestTxnPipelinerMaxBatchSize(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/kv/kvnemesis/kvnemesis_test.go b/pkg/kv/kvnemesis/kvnemesis_test.go index 0f7824bc70b4..3b5e7aeef5c5 100644 --- a/pkg/kv/kvnemesis/kvnemesis_test.go +++ b/pkg/kv/kvnemesis/kvnemesis_test.go @@ -295,7 +295,7 @@ func testKVNemesisImpl(t *testing.T, cfg kvnemesisTestCfg) { sqlutils.MakeSQLRunner(sqlDBs[0]).Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled = true`) // Turn net/trace on, which results in real trace spans created throughout. // This gives kvnemesis a chance to hit NPEs related to tracing. - sqlutils.MakeSQLRunner(sqlDBs[0]).Exec(t, `SET CLUSTER SETTING trace.debug.enable = true`) + sqlutils.MakeSQLRunner(sqlDBs[0]).Exec(t, `SET CLUSTER SETTING trace.debug_http_endpoint.enabled = true`) config := NewDefaultConfig() config.NumNodes = cfg.numNodes diff --git a/pkg/kv/kvserver/allocator/storepool/store_pool.go b/pkg/kv/kvserver/allocator/storepool/store_pool.go index 6efa6caded65..66c9357d6f66 100644 --- a/pkg/kv/kvserver/allocator/storepool/store_pool.go +++ b/pkg/kv/kvserver/allocator/storepool/store_pool.go @@ -42,6 +42,7 @@ var FailedReservationsTimeout = settings.RegisterDurationSetting( "the amount of time to consider the store throttled for up-replication after a failed reservation call", 5*time.Second, settings.NonNegativeDuration, + settings.WithName("server.failed_reservation.timeout"), ) // The NodeCountFunc returns a count of the total number of nodes the user diff --git a/pkg/kv/kvserver/client_merge_test.go b/pkg/kv/kvserver/client_merge_test.go index 7eb38d7d6d9f..dc5f76fe1db6 100644 --- a/pkg/kv/kvserver/client_merge_test.go +++ b/pkg/kv/kvserver/client_merge_test.go @@ -4262,7 +4262,7 @@ func TestMergeQueue(t *testing.T) { store := tc.GetFirstStoreFromServer(t, 0) // The cluster with manual replication disables the merge queue, // so we need to re-enable. - _, err := tc.ServerConn(0).Exec(`SET CLUSTER SETTING kv.range_merge.queue_enabled = true`) + _, err := tc.ServerConn(0).Exec(`SET CLUSTER SETTING kv.range_merge.queue.enabled = true`) require.NoError(t, err) store.SetMergeQueueActive(true) @@ -4837,7 +4837,7 @@ func TestMergeQueueWithSlowNonVoterSnaps(t *testing.T) { defer tc.Stopper().Stop(ctx) // We're controlling merge queue operation via // `store.SetMergeQueueActive`, so enable the cluster setting here. - _, err := tc.ServerConn(0).Exec(`SET CLUSTER SETTING kv.range_merge.queue_enabled=true`) + _, err := tc.ServerConn(0).Exec(`SET CLUSTER SETTING kv.range_merge.queue.enabled=true`) require.NoError(t, err) store, err := tc.Server(0).GetStores().(*kvserver.Stores).GetStore(1) diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index 322f80772956..43778824a2e2 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -4262,7 +4262,7 @@ func TestStrictGCEnforcement(t *testing.T) { // Disable follower reads. When metamorphically enabling expiration-based // leases, an expired lease will cause a follower read which bypasses the // strict GC enforcement. - sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.follower_reads_enabled = false") + sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.follower_reads.enabled = false") sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '10 ms'") defer sqlDB.Exec(t, `SET CLUSTER SETTING kv.gc_ttl.strict_enforcement.enabled = DEFAULT`) diff --git a/pkg/kv/kvserver/closed_timestamp_test.go b/pkg/kv/kvserver/closed_timestamp_test.go index c06cf6ef70f9..91bbceca466c 100644 --- a/pkg/kv/kvserver/closed_timestamp_test.go +++ b/pkg/kv/kvserver/closed_timestamp_test.go @@ -387,7 +387,7 @@ func TestClosedTimestampCanServeAfterSplitAndMerges(t *testing.T) { tc, db0, desc := setupClusterForClosedTSTesting(ctx, t, testingTargetDuration, 0, cArgs, "cttest", "kv") repls := replsForRange(ctx, t, tc, desc) // Disable the automatic merging. - if _, err := db0.Exec("SET CLUSTER SETTING kv.range_merge.queue_enabled = false"); err != nil { + if _, err := db0.Exec("SET CLUSTER SETTING kv.range_merge.queue.enabled = false"); err != nil { t.Fatal(err) } @@ -740,7 +740,7 @@ func TestClosedTimestampFrozenAfterSubsumption(t *testing.T) { sqlDB.ExecMultiple(t, strings.Split(fmt.Sprintf(` SET CLUSTER SETTING kv.closed_timestamp.target_duration = '%s'; SET CLUSTER SETTING kv.closed_timestamp.side_transport_interval = '%s'; -SET CLUSTER SETTING kv.closed_timestamp.follower_reads_enabled = true; +SET CLUSTER SETTING kv.closed_timestamp.follower_reads.enabled = true; `, 5*time.Second, 100*time.Millisecond), ";")...) leftDesc, rightDesc := splitDummyRangeInTestCluster(t, tc, "cttest", "kv", hlc.Timestamp{} /* splitExpirationTime */) @@ -1226,7 +1226,7 @@ func setupClusterForClosedTSTesting( sqlRunner.ExecMultiple(t, strings.Split(fmt.Sprintf(` SET CLUSTER SETTING kv.closed_timestamp.target_duration = '%s'; SET CLUSTER SETTING kv.closed_timestamp.side_transport_interval = '%s'; -SET CLUSTER SETTING kv.closed_timestamp.follower_reads_enabled = true; +SET CLUSTER SETTING kv.closed_timestamp.follower_reads.enabled = true; SET CLUSTER SETTING kv.allocator.load_based_rebalancing = 'off'; `, targetDuration, sideTransportInterval), ";")...) diff --git a/pkg/kv/kvserver/kvserverbase/base.go b/pkg/kv/kvserver/kvserverbase/base.go index 131105fc2e1a..88ff413b10f2 100644 --- a/pkg/kv/kvserver/kvserverbase/base.go +++ b/pkg/kv/kvserver/kvserverbase/base.go @@ -32,6 +32,7 @@ var MergeQueueEnabled = settings.RegisterBoolSetting( "kv.range_merge.queue_enabled", "whether the automatic merge queue is enabled", true, + settings.WithName("kv.range_merge.queue.enabled"), ) // ReplicateQueueEnabled is a setting that controls whether the replicate queue diff --git a/pkg/kv/kvserver/logstore/logstore.go b/pkg/kv/kvserver/logstore/logstore.go index 40dc17ee17fd..d3ca200feb0c 100644 --- a/pkg/kv/kvserver/logstore/logstore.go +++ b/pkg/kv/kvserver/logstore/logstore.go @@ -46,6 +46,7 @@ var disableSyncRaftLog = settings.RegisterBoolSetting( "This not only disables fsync, but also disables flushing writes to the OS buffer. "+ "The setting is meant for internal testing only and SHOULD NOT be used in production.", envutil.EnvOrDefaultBool("COCKROACH_DISABLE_RAFT_LOG_SYNCHRONIZATION_UNSAFE", false), + settings.WithName("kv.raft_log.synchronization.disabled"), ) var enableNonBlockingRaftLogSync = settings.RegisterBoolSetting( diff --git a/pkg/kv/kvserver/replica_follower_read.go b/pkg/kv/kvserver/replica_follower_read.go index 0e9db1d7d537..6aae45e780f3 100644 --- a/pkg/kv/kvserver/replica_follower_read.go +++ b/pkg/kv/kvserver/replica_follower_read.go @@ -31,6 +31,7 @@ var FollowerReadsEnabled = settings.RegisterBoolSetting( "kv.closed_timestamp.follower_reads_enabled", "allow (all) replicas to serve consistent historical reads based on closed timestamp information", true, + settings.WithName("kv.closed_timestamp.follower_reads.enabled"), settings.WithPublic) // BatchCanBeEvaluatedOnFollower determines if a batch consists exclusively of diff --git a/pkg/kv/kvserver/replica_learner_test.go b/pkg/kv/kvserver/replica_learner_test.go index 7ba6ab2c48ee..8c0c27b0a842 100644 --- a/pkg/kv/kvserver/replica_learner_test.go +++ b/pkg/kv/kvserver/replica_learner_test.go @@ -1651,7 +1651,7 @@ func TestLearnerAndVoterOutgoingFollowerRead(t *testing.T) { db.Exec(t, fmt.Sprintf(`SET CLUSTER SETTING kv.closed_timestamp.target_duration = '%s'`, testingTargetDuration)) db.Exec(t, fmt.Sprintf(`SET CLUSTER SETTING kv.closed_timestamp.side_transport_interval = '%s'`, testingSideTransportInterval)) - db.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.follower_reads_enabled = true`) + db.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.follower_reads.enabled = true`) scratchStartKey := tc.ScratchRange(t) var scratchDesc roachpb.RangeDescriptor @@ -2039,7 +2039,7 @@ func TestMergeQueueDoesNotInterruptReplicationChange(t *testing.T) { db := sqlutils.MakeSQLRunner(tc.ServerConn(0)) // TestCluster currently overrides this when used with ReplicationManual. - db.Exec(t, `SET CLUSTER SETTING kv.range_merge.queue_enabled = true`) + db.Exec(t, `SET CLUSTER SETTING kv.range_merge.queue.enabled = true`) // While this replication change is stalled, we'll trigger a merge and // ensure that the merge correctly notices that there is a snapshot in @@ -2068,7 +2068,7 @@ func TestMergeQueueSeesLearnerOrJointConfig(t *testing.T) { defer tc.Stopper().Stop(ctx) db := sqlutils.MakeSQLRunner(tc.ServerConn(0)) // TestCluster currently overrides this when used with ReplicationManual. - db.Exec(t, `SET CLUSTER SETTING kv.range_merge.queue_enabled = true`) + db.Exec(t, `SET CLUSTER SETTING kv.range_merge.queue.enabled = true`) scratchStartKey := tc.ScratchRange(t) origDesc := tc.LookupRangeOrFatal(t, scratchStartKey) diff --git a/pkg/kv/kvserver/replica_rankings_test.go b/pkg/kv/kvserver/replica_rankings_test.go index b30898da4f17..1fa7f64d3e17 100644 --- a/pkg/kv/kvserver/replica_rankings_test.go +++ b/pkg/kv/kvserver/replica_rankings_test.go @@ -256,7 +256,7 @@ func TestWriteLoadStatsAccounting(t *testing.T) { // Disable the consistency checker, to avoid interleaving requests // artificially inflating measurement due to consistency checking. sqlDB.Exec(t, `SET CLUSTER SETTING server.consistency_check.interval = '0'`) - sqlDB.Exec(t, `SET CLUSTER SETTING kv.range_split.by_load_enabled = false`) + sqlDB.Exec(t, `SET CLUSTER SETTING kv.range_split.by_load.enabled = false`) for _, testCase := range testCases { // This test can flake, where an errant request - not sent here @@ -387,7 +387,7 @@ func TestReadLoadMetricAccounting(t *testing.T) { // Disable the consistency checker, to avoid interleaving requests // artificially inflating measurement due to consistency checking. sqlDB.Exec(t, `SET CLUSTER SETTING server.consistency_check.interval = '0'`) - sqlDB.Exec(t, `SET CLUSTER SETTING kv.range_split.by_load_enabled = false`) + sqlDB.Exec(t, `SET CLUSTER SETTING kv.range_split.by_load.enabled = false`) for _, testCase := range testCases { // This test can flake, where an errant request - not sent here diff --git a/pkg/kv/kvserver/replica_split_load.go b/pkg/kv/kvserver/replica_split_load.go index f13e206f26a6..4ed454c13a77 100644 --- a/pkg/kv/kvserver/replica_split_load.go +++ b/pkg/kv/kvserver/replica_split_load.go @@ -26,12 +26,13 @@ import ( "github.com/cockroachdb/errors" ) -// SplitByLoadEnabled wraps "kv.range_split.by_load_enabled". +// SplitByLoadEnabled wraps "kv.range_split.by_load.enabled". var SplitByLoadEnabled = settings.RegisterBoolSetting( settings.SystemOnly, "kv.range_split.by_load_enabled", "allow automatic splits of ranges based on where load is concentrated", true, + settings.WithName("kv.range_split.by_load.enabled"), settings.WithPublic) // SplitByLoadQPSThreshold wraps "kv.range_split.load_qps_threshold". diff --git a/pkg/server/authserver/authentication.go b/pkg/server/authserver/authentication.go index f1ef750358aa..28eef76395b3 100644 --- a/pkg/server/authserver/authentication.go +++ b/pkg/server/authserver/authentication.go @@ -100,6 +100,7 @@ var WebSessionTimeout = settings.RegisterDurationSetting( "the duration that a newly created web session will be valid", 7*24*time.Hour, settings.NonNegativeDuration, + settings.WithName("server.web_session.timeout"), settings.WithPublic) type authenticationServer struct { diff --git a/pkg/server/clock_monotonicity.go b/pkg/server/clock_monotonicity.go index d1a72a8c1e0a..6b3fec3c8125 100644 --- a/pkg/server/clock_monotonicity.go +++ b/pkg/server/clock_monotonicity.go @@ -28,6 +28,7 @@ var ( "server.clock.forward_jump_check_enabled", "if enabled, forward clock jumps > max_offset/2 will cause a panic", false, + settings.WithName("server.clock.forward_jump_check.enabled"), settings.WithPublic) persistHLCUpperBoundInterval = settings.RegisterDurationSetting( @@ -62,7 +63,7 @@ func (s *topLevelServer) startMonitoringForwardClockJumps(ctx context.Context) e return errors.Wrap(err, "monitoring forward clock jumps") } - log.Ops.Info(ctx, "monitoring forward clock jumps based on server.clock.forward_jump_check_enabled") + log.Ops.Info(ctx, "monitoring forward clock jumps based on server.clock.forward_jump_check.enabled") return nil } diff --git a/pkg/server/import_ts.go b/pkg/server/import_ts.go index 03b8d971a99a..37c55151096e 100644 --- a/pkg/server/import_ts.go +++ b/pkg/server/import_ts.go @@ -75,7 +75,7 @@ func maybeImportTS(ctx context.Context, s *topLevelServer) (returnErr error) { // Disable writing of new timeseries, as well as roll-ups and deletion. for _, stmt := range []string{ - "SET CLUSTER SETTING kv.raft_log.disable_synchronization_unsafe = 'true';", + "SET CLUSTER SETTING kv.raft_log.synchronization.disabled = 'true';", "SET CLUSTER SETTING timeseries.storage.enabled = 'false';", "SET CLUSTER SETTING timeseries.storage.resolution_10s.ttl = '99999h';", "SET CLUSTER SETTING timeseries.storage.resolution_30m.ttl = '99999h';", diff --git a/pkg/sql/copy_test.go b/pkg/sql/copy_test.go index ef943a2ca710..b420a83e073e 100644 --- a/pkg/sql/copy_test.go +++ b/pkg/sql/copy_test.go @@ -37,7 +37,7 @@ func TestCopyLogging(t *testing.T) { ctx := context.Background() for _, strings := range [][]string{ - {`SET CLUSTER SETTING sql.trace.log_statement_execute = true`}, + {`SET CLUSTER SETTING sql.log.all_statements.enabled = true`}, {`SET CLUSTER SETTING sql.telemetry.query_sampling.enabled = true`}, {`SET CLUSTER SETTING sql.log.admin_audit.enabled = true`}, } { diff --git a/pkg/sql/exec_log.go b/pkg/sql/exec_log.go index 2695695ddfd6..40dea3b2783b 100644 --- a/pkg/sql/exec_log.go +++ b/pkg/sql/exec_log.go @@ -49,8 +49,9 @@ import ( var logStatementsExecuteEnabled = settings.RegisterBoolSetting( settings.TenantWritable, "sql.trace.log_statement_execute", - "set to true to enable logging of executed statements", + "set to true to enable logging of all executed statements", false, + settings.WithName("sql.log.all_statements.enabled"), settings.WithPublic) var slowQueryLogThreshold = settings.RegisterDurationSettingWithExplicitUnit( diff --git a/pkg/sql/flowinfra/flow_registry.go b/pkg/sql/flowinfra/flow_registry.go index 920ad3fe649f..224fe3a8baca 100644 --- a/pkg/sql/flowinfra/flow_registry.go +++ b/pkg/sql/flowinfra/flow_registry.go @@ -45,6 +45,7 @@ var SettingFlowStreamTimeout = settings.RegisterDurationSetting( "amount of time incoming streams wait for a flow to be set up before erroring out", 10*time.Second, settings.NonNegativeDuration, + settings.WithName("sql.distsql.flow_stream.timeout"), ) // expectedConnectionTime is the expected time taken by a flow to connect to its diff --git a/pkg/sql/logictest/testdata/logic_test/cluster_settings b/pkg/sql/logictest/testdata/logic_test/cluster_settings index bbeab076ab44..b203581ee475 100644 --- a/pkg/sql/logictest/testdata/logic_test/cluster_settings +++ b/pkg/sql/logictest/testdata/logic_test/cluster_settings @@ -427,3 +427,27 @@ SHOW CLUSTER SETTING sql.defaults.distsql; query T noticetrace SHOW CLUSTER SETTING sql.notices.enabled ---- + +subtest deprecated_names + +user root + +# The following tests exercise the general mechanism to inform the user +# of new setting names. It is not specific to the particular setting +# named below. + +query B noticetrace +SHOW CLUSTER SETTING sql.trace.log_statement_execute +---- +NOTICE: the name "sql.trace.log_statement_execute" is deprecated; use "sql.log.all_statements.enabled" instead + +query T noticetrace +RESET CLUSTER SETTING sql.trace.log_statement_execute +---- +NOTICE: the name "sql.trace.log_statement_execute" is deprecated; use "sql.log.all_statements.enabled" instead + +skipif config 3node-tenant-default-configs +query T noticetrace +ALTER TENANT ALL RESET CLUSTER SETTING sql.trace.log_statement_execute +---- +NOTICE: the name "sql.trace.log_statement_execute" is deprecated; use "sql.log.all_statements.enabled" instead diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_subquery b/pkg/sql/logictest/testdata/logic_test/distsql_subquery index 5080e432ad74..89f0153b8be1 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_subquery +++ b/pkg/sql/logictest/testdata/logic_test/distsql_subquery @@ -10,7 +10,7 @@ statement ok INSERT INTO ab VALUES (1, 1), (1, 3), (2, 2) statement ok -SET CLUSTER SETTING kv.range_merge.queue_enabled = false +SET CLUSTER SETTING kv.range_merge.queue.enabled = false statement ok ALTER TABLE ab SPLIT AT VALUES (2) diff --git a/pkg/sql/logictest/testdata/logic_test/event_log b/pkg/sql/logictest/testdata/logic_test/event_log index 47c4ae19bc90..18882f7a959c 100644 --- a/pkg/sql/logictest/testdata/logic_test/event_log +++ b/pkg/sql/logictest/testdata/logic_test/event_log @@ -478,7 +478,7 @@ AND info NOT LIKE '%sql.stats%' ORDER BY "timestamp", info ---- 1 {"ApplicationName": "$ internal-optInToDiagnosticsStatReporting", "EventType": "set_cluster_setting", "SettingName": "diagnostics.reporting.enabled", "Statement": "SET CLUSTER SETTING \"diagnostics.reporting.enabled\" = true", "Tag": "SET CLUSTER SETTING", "User": "root", "Value": "true"} -1 {"ApplicationName": "$ internal-enable-merge-queue", "EventType": "set_cluster_setting", "SettingName": "kv.range_merge.queue_enabled", "Statement": "SET CLUSTER SETTING \"kv.range_merge.queue_enabled\" = false", "Tag": "SET CLUSTER SETTING", "User": "root", "Value": "false"} +1 {"ApplicationName": "$ internal-enable-merge-queue", "EventType": "set_cluster_setting", "SettingName": "kv.range_merge.queue.enabled", "Statement": "SET CLUSTER SETTING \"kv.range_merge.queue.enabled\" = false", "Tag": "SET CLUSTER SETTING", "User": "root", "Value": "false"} 1 {"EventType": "set_cluster_setting", "SettingName": "sql.crdb_internal.table_row_statistics.as_of_time", "Statement": "SET CLUSTER SETTING \"sql.crdb_internal.table_row_statistics.as_of_time\" = e'-1\\u00B5s'", "Tag": "SET CLUSTER SETTING", "User": "root", "Value": "-00:00:00.000001"} 1 {"EventType": "set_cluster_setting", "SettingName": "kv.allocator.load_based_lease_rebalancing.enabled", "Statement": "SET CLUSTER SETTING \"kv.allocator.load_based_lease_rebalancing.enabled\" = false", "Tag": "SET CLUSTER SETTING", "User": "root", "Value": "false"} 1 {"EventType": "set_cluster_setting", "SettingName": "kv.allocator.load_based_lease_rebalancing.enabled", "Statement": "SET CLUSTER SETTING \"kv.allocator.load_based_lease_rebalancing.enabled\" = DEFAULT", "Tag": "SET CLUSTER SETTING", "User": "root", "Value": "DEFAULT"} diff --git a/pkg/sql/logictest/testdata/logic_test/event_log_legacy b/pkg/sql/logictest/testdata/logic_test/event_log_legacy index ac2b7f3e842e..1c2657aa4cfe 100644 --- a/pkg/sql/logictest/testdata/logic_test/event_log_legacy +++ b/pkg/sql/logictest/testdata/logic_test/event_log_legacy @@ -479,7 +479,7 @@ AND info NOT LIKE '%sql.stats%' ORDER BY "timestamp", info ---- 1 {"ApplicationName": "$ internal-optInToDiagnosticsStatReporting", "EventType": "set_cluster_setting", "SettingName": "diagnostics.reporting.enabled", "Statement": "SET CLUSTER SETTING \"diagnostics.reporting.enabled\" = true", "Tag": "SET CLUSTER SETTING", "User": "root", "Value": "true"} -1 {"EventType": "set_cluster_setting", "SettingName": "kv.range_merge.queue_enabled", "Statement": "SET CLUSTER SETTING \"kv.range_merge.queue_enabled\" = false", "Tag": "SET CLUSTER SETTING", "User": "root", "Value": "false"} +1 {"EventType": "set_cluster_setting", "SettingName": "kv.range_merge.queue.enabled", "Statement": "SET CLUSTER SETTING \"kv.range_merge.queue.enabled\" = false", "Tag": "SET CLUSTER SETTING", "User": "root", "Value": "false"} 1 {"EventType": "set_cluster_setting", "SettingName": "sql.crdb_internal.table_row_statistics.as_of_time", "Statement": "SET CLUSTER SETTING \"sql.crdb_internal.table_row_statistics.as_of_time\" = e'-1\\u00B5s'", "Tag": "SET CLUSTER SETTING", "User": "root", "Value": "-00:00:00.000001"} 1 {"EventType": "set_cluster_setting", "SettingName": "kv.allocator.load_based_lease_rebalancing.enabled", "Statement": "SET CLUSTER SETTING \"kv.allocator.load_based_lease_rebalancing.enabled\" = false", "Tag": "SET CLUSTER SETTING", "User": "root", "Value": "false"} 1 {"EventType": "set_cluster_setting", "SettingName": "kv.allocator.load_based_lease_rebalancing.enabled", "Statement": "SET CLUSTER SETTING \"kv.allocator.load_based_lease_rebalancing.enabled\" = DEFAULT", "Tag": "SET CLUSTER SETTING", "User": "root", "Value": "DEFAULT"} diff --git a/pkg/sql/logictest/testdata/logic_test/poison_after_push b/pkg/sql/logictest/testdata/logic_test/poison_after_push index 7346423e18ad..2205d8984d15 100644 --- a/pkg/sql/logictest/testdata/logic_test/poison_after_push +++ b/pkg/sql/logictest/testdata/logic_test/poison_after_push @@ -18,7 +18,7 @@ # the limitation described in #24798. Variants that do not use DistSQL # transparently refresh this error away. statement ok -SET CLUSTER SETTING kv.transaction.write_pipelining_enabled = false +SET CLUSTER SETTING kv.transaction.write_pipelining.enabled = false statement ok CREATE TABLE t (id INT PRIMARY KEY) diff --git a/pkg/sql/logictest/testdata/logic_test/statement_statistics b/pkg/sql/logictest/testdata/logic_test/statement_statistics index 5094f64232fd..4f6072d57c4f 100644 --- a/pkg/sql/logictest/testdata/logic_test/statement_statistics +++ b/pkg/sql/logictest/testdata/logic_test/statement_statistics @@ -134,16 +134,16 @@ SELECT x FROM test WHERE y = 1/z # Set a cluster setting to make it show up below. Which one is set # does not matter. statement ok -SET CLUSTER SETTING debug.panic_on_failed_assertions = true; +SET CLUSTER SETTING debug.panic_on_failed_assertions.enabled = true; statement ok -RESET CLUSTER SETTING debug.panic_on_failed_assertions +RESET CLUSTER SETTING debug.panic_on_failed_assertions.enabled statement ok SHOW application_name statement ok -SHOW CLUSTER SETTING debug.panic_on_failed_assertions +SHOW CLUSTER SETTING debug.panic_on_failed_assertions.enabled statement ok SET application_name = ''; @@ -167,11 +167,11 @@ SELECT x FROM test WHERE y = (_ / z) !+ SELECT x FROM test WHERE y IN (_, _, _ + x, _, _) · SELECT x FROM test WHERE y IN (_, _, __more1_10__) + SELECT x FROM test WHERE y NOT IN (_, _, __more1_10__) · -SET CLUSTER SETTING "debug.panic_on_failed_assertions" = DEFAULT · -SET CLUSTER SETTING "debug.panic_on_failed_assertions" = _ · +SET CLUSTER SETTING "debug.panic_on_failed_assertions.enabled" = DEFAULT · +SET CLUSTER SETTING "debug.panic_on_failed_assertions.enabled" = _ · SET application_name = '_' · SET distsql = "on" · -SHOW CLUSTER SETTING "debug.panic_on_failed_assertions" · +SHOW CLUSTER SETTING "debug.panic_on_failed_assertions.enabled" · SHOW application_name · # Check that the latency measurements looks reasonable, protecting diff --git a/pkg/sql/logictest/testdata/logic_test/system b/pkg/sql/logictest/testdata/logic_test/system index 638dee4d4f14..1f431e322188 100644 --- a/pkg/sql/logictest/testdata/logic_test/system +++ b/pkg/sql/logictest/testdata/logic_test/system @@ -1168,7 +1168,8 @@ GRANT ALL ON system.lease TO testuser # NB: the "order by" is necessary or this test is flaky under DistSQL. # This is somewhat surprising. # With probabilistic test tenant creation, we have to filter out -# kv.range_merge.queue_enabled, since it will only be set in cases +# kv.range_merge.queue.enabled (with key name +# kv.range_merge.queue_enabled), since it will only be set in cases # where a test tenant is not allocated. query T SELECT name @@ -1203,7 +1204,8 @@ version statement ok INSERT INTO system.settings (name, value) VALUES ('somesetting', 'somevalue') -# Have to exclude kv.range_merge.queue_enabled as it is not accessible +# Have to exclude kv.range_merge.queue.enabled (with key name +# kv.range_merge.queue_enabled) as it is not accessible # to tenants. query TT SELECT name, value diff --git a/pkg/sql/logictest/testdata/logic_test/tenant b/pkg/sql/logictest/testdata/logic_test/tenant index 537f33876948..c741d7f39bd9 100644 --- a/pkg/sql/logictest/testdata/logic_test/tenant +++ b/pkg/sql/logictest/testdata/logic_test/tenant @@ -383,7 +383,7 @@ SELECT id FROM system.tenants WHERE name = 'tmpl' statement ok ALTER TENANT tmpl GRANT CAPABILITY can_view_node_info; -- will be copied -ALTER TENANT tmpl SET CLUSTER SETTING trace.debug.enable = true; -- will be copied +ALTER TENANT tmpl SET CLUSTER SETTING trace.debug_http_endpoint.enabled = true; -- will be copied -- Simulate resource limits. Will be copied. -- Note: we cannot use the update_tenant_resource_limits() builtin -- directly here because it can only be used from a CCL binary. @@ -419,7 +419,7 @@ query TTTT rowsort SELECT variable, value, type, origin FROM [SHOW CLUSTER SETTINGS FOR TENANT othertenant] WHERE origin != 'no-override' ---- -trace.debug.enable true b per-tenant-override +trace.debug_http_endpoint.enabled true b per-tenant-override # Check that the resource usage parameters were copied. query IIRRRRI @@ -467,7 +467,7 @@ query TTTT rowsort SELECT variable, value, type, origin FROM [SHOW CLUSTER SETTINGS FOR TENANT othertenant] WHERE origin != 'no-override' ---- -trace.debug.enable true b per-tenant-override +trace.debug_http_endpoint.enabled true b per-tenant-override # Check that the resource usage parameters were copied. query IIRRRRI diff --git a/pkg/sql/scatter_test.go b/pkg/sql/scatter_test.go index 94edac3cab63..35d4f009e57c 100644 --- a/pkg/sql/scatter_test.go +++ b/pkg/sql/scatter_test.go @@ -53,7 +53,7 @@ func TestScatterRandomizeLeases(t *testing.T) { // Even though we disabled merges via the store testing knob, we must also // disable the setting in order for manual splits to be allowed. - r.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue_enabled = false") + r.Exec(t, "SET CLUSTER SETTING kv.range_merge.queue.enabled = false") // Introduce 99 splits to get 100 ranges. r.Exec(t, "ALTER TABLE test.t SPLIT AT (SELECT i*10 FROM generate_series(1, 99) AS g(i))") diff --git a/pkg/sql/show_test.go b/pkg/sql/show_test.go index b43971ff9bae..4088e3c48254 100644 --- a/pkg/sql/show_test.go +++ b/pkg/sql/show_test.go @@ -1223,10 +1223,13 @@ func TestLintClusterSettingNames(t *testing.T) { t.Errorf("%s: variable name must be all lowercase", settingName) } - suffixSuggestions := map[string]string{ - "_ttl": ".ttl", - "_enabled": ".enabled", - "_timeout": ".timeout", + suffixSuggestions := map[string]struct { + suggestion string + exceptions []string + }{ + "_ttl": {suggestion: ".ttl"}, + "_enabled": {suggestion: ".enabled"}, + "_timeout": {suggestion: ".timeout", exceptions: []string{".read_timeout", ".write_timeout"}}, } nameErr := func() error { @@ -1253,68 +1256,34 @@ func TestLintClusterSettingNames(t *testing.T) { } } - for suffix, repl := range suffixSuggestions { - if strings.HasSuffix(settingName, suffix) { - return errors.Errorf("%s: use %q instead of %q", settingName, repl, suffix) + if !strings.HasPrefix(settingName, "sql.defaults.") { + // The sql.default settings are special cased: they correspond + // to same-name session variables, and session var names cannot + // contain periods. + for suffix, repl := range suffixSuggestions { + if strings.HasSuffix(settingName, suffix) { + hasException := false + for _, e := range repl.exceptions { + if strings.HasSuffix(settingName, e) { + hasException = true + break + } + } + if !hasException { + return errors.Errorf("%s: use %q instead of %q", settingName, repl.suggestion, suffix) + } + } } - } - if sType == "b" && !strings.HasSuffix(settingName, ".enabled") { - return errors.Errorf("%s: use .enabled for booleans", settingName) + if sType == "b" && !strings.HasSuffix(settingName, ".enabled") && !strings.HasSuffix(settingName, ".disabled") { + return errors.Errorf("%s: use .enabled for booleans (or, rarely, .disabled)", settingName) + } } return nil }() if nameErr != nil { - var grandFathered = map[string]string{ - "server.declined_reservation_timeout": `server.declined_reservation_timeout: use ".timeout" instead of "_timeout"`, - "server.failed_reservation_timeout": `server.failed_reservation_timeout: use ".timeout" instead of "_timeout"`, - "server.web_session_timeout": `server.web_session_timeout: use ".timeout" instead of "_timeout"`, - "sql.distsql.flow_stream_timeout": `sql.distsql.flow_stream_timeout: use ".timeout" instead of "_timeout"`, - "debug.panic_on_failed_assertions": `debug.panic_on_failed_assertions: use .enabled for booleans`, - "diagnostics.reporting.send_crash_reports": `diagnostics.reporting.send_crash_reports: use .enabled for booleans`, - "kv.closed_timestamp.follower_reads_enabled": `kv.closed_timestamp.follower_reads_enabled: use ".enabled" instead of "_enabled"`, - "kv.raft_log.disable_synchronization_unsafe": `kv.raft_log.disable_synchronization_unsafe: use .enabled for booleans`, - "kv.range_merge.queue_enabled": `kv.range_merge.queue_enabled: use ".enabled" instead of "_enabled"`, - "kv.range_split.by_load_enabled": `kv.range_split.by_load_enabled: use ".enabled" instead of "_enabled"`, - "kv.transaction.parallel_commits_enabled": `kv.transaction.parallel_commits_enabled: use ".enabled" instead of "_enabled"`, - "kv.transaction.write_pipelining_enabled": `kv.transaction.write_pipelining_enabled: use ".enabled" instead of "_enabled"`, - "server.clock.forward_jump_check_enabled": `server.clock.forward_jump_check_enabled: use ".enabled" instead of "_enabled"`, - "sql.defaults.experimental_optimizer_mutations": `sql.defaults.experimental_optimizer_mutations: use .enabled for booleans`, - "sql.distsql.distribute_index_joins": `sql.distsql.distribute_index_joins: use .enabled for booleans`, - "sql.metrics.statement_details.dump_to_logs": `sql.metrics.statement_details.dump_to_logs: use .enabled for booleans`, - "sql.metrics.statement_details.sample_logical_plans": `sql.metrics.statement_details.sample_logical_plans: use .enabled for booleans`, - "sql.trace.log_statement_execute": `sql.trace.log_statement_execute: use .enabled for booleans`, - "trace.debug.enable": `trace.debug.enable: use .enabled for booleans`, - - // These were grandfathered because the test wasn't running on - // the CCL code. - "bulkio.backup.export_request_verbose_tracing": `bulkio.backup.export_request_verbose_tracing: use .enabled for booleans`, - "bulkio.backup.read_timeout": `bulkio.backup.read_timeout: use ".timeout" instead of "_timeout"`, - "bulkio.backup.split_keys_on_timestamps": `bulkio.backup.split_keys_on_timestamps: use .enabled for booleans`, - "bulkio.restore.memory_monitor_ssts": `bulkio.restore.memory_monitor_ssts: use .enabled for booleans`, - "bulkio.restore.use_simple_import_spans": `bulkio.restore.use_simple_import_spans: use .enabled for booleans`, - "changefeed.balance_range_distribution.enable": `changefeed.balance_range_distribution.enable: use .enabled for booleans`, - "changefeed.batch_reduction_retry_enabled": `changefeed.batch_reduction_retry_enabled: use ".enabled" instead of "_enabled"`, - "changefeed.idle_timeout": `changefeed.idle_timeout: use ".timeout" instead of "_timeout"`, - "changefeed.new_pubsub_sink_enabled": `changefeed.new_pubsub_sink_enabled: use ".enabled" instead of "_enabled"`, - "changefeed.new_webhook_sink_enabled": `changefeed.new_webhook_sink_enabled: use ".enabled" instead of "_enabled"`, - "changefeed.permissions.require_external_connection_sink": `changefeed.permissions.require_external_connection_sink: use .enabled for booleans`, - "server.oidc_authentication.autologin": `server.oidc_authentication.autologin: use .enabled for booleans`, - "stream_replication.job_liveness_timeout": `stream_replication.job_liveness_timeout: use ".timeout" instead of "_timeout"`, - - // These use the _timeout suffix to stay consistent with the - // corresponding session variables. - "sql.defaults.statement_timeout": `sql.defaults.statement_timeout: use ".timeout" instead of "_timeout"`, - "sql.defaults.lock_timeout": `sql.defaults.lock_timeout: use ".timeout" instead of "_timeout"`, - "sql.defaults.idle_in_session_timeout": `sql.defaults.idle_in_session_timeout: use ".timeout" instead of "_timeout"`, - "sql.defaults.idle_in_transaction_session_timeout": `sql.defaults.idle_in_transaction_session_timeout: use ".timeout" instead of "_timeout"`, - "cloudstorage.gs.chunking.retry_timeout": `cloudstorage.gs.chunking.retry_timeout: use ".timeout" instead of "_timeout"`, - } - expectedErr, found := grandFathered[settingName] - if !found || expectedErr != nameErr.Error() { - t.Error(nameErr) - } + t.Error(nameErr) } if strings.TrimSpace(desc) != desc { diff --git a/pkg/sql/sqlstats/cluster_settings.go b/pkg/sql/sqlstats/cluster_settings.go index c5de5c427485..9d4998185a81 100644 --- a/pkg/sql/sqlstats/cluster_settings.go +++ b/pkg/sql/sqlstats/cluster_settings.go @@ -57,6 +57,7 @@ var DumpStmtStatsToLogBeforeReset = settings.RegisterBoolSetting( "sql.metrics.statement_details.dump_to_logs", "dump collected statement statistics to node logs when periodically cleared", false, + settings.WithName("sql.metrics.statement_details.dump_to_logs.enabled"), settings.WithPublic) // SampleLogicalPlans specifies whether we periodically sample the logical plan diff --git a/pkg/sql/sqltestutils/telemetry.go b/pkg/sql/sqltestutils/telemetry.go index 01814d4d5ad6..60fa12096c70 100644 --- a/pkg/sql/sqltestutils/telemetry.go +++ b/pkg/sql/sqltestutils/telemetry.go @@ -304,7 +304,7 @@ func (tt *telemetryTest) prepareCluster(db *gosql.DB) { runner := sqlutils.MakeSQLRunner(db) // Disable automatic reporting so it doesn't interfere with the test. runner.Exec(tt.t, "SET CLUSTER SETTING diagnostics.reporting.enabled = false") - runner.Exec(tt.t, "SET CLUSTER SETTING diagnostics.reporting.send_crash_reports = false") + runner.Exec(tt.t, "SET CLUSTER SETTING diagnostics.reporting.send_crash_reports.enabled = false") // Disable plan caching to get accurate counts if the same statement is // issued multiple times. runner.Exec(tt.t, "SET CLUSTER SETTING sql.query_cache.enabled = false") diff --git a/pkg/sql/trace_test.go b/pkg/sql/trace_test.go index f8ea47ebe277..c43e685122c0 100644 --- a/pkg/sql/trace_test.go +++ b/pkg/sql/trace_test.go @@ -346,7 +346,7 @@ func TestTrace(t *testing.T) { } if _, err := cluster.ServerConn(0).Exec( - fmt.Sprintf(`SET CLUSTER SETTING trace.debug.enable = %t`, enableTr), + fmt.Sprintf(`SET CLUSTER SETTING trace.debug_http_endpoint.enabled = %t`, enableTr), ); err != nil { t.Fatal(err) } diff --git a/pkg/testutils/testcluster/testcluster.go b/pkg/testutils/testcluster/testcluster.go index e8abbca84544..bc976265840f 100644 --- a/pkg/testutils/testcluster/testcluster.go +++ b/pkg/testutils/testcluster/testcluster.go @@ -441,7 +441,7 @@ func (tc *TestCluster) Start(t serverutils.TestFataler) { if _, err := tc.Servers[0].SystemLayer(). InternalExecutor().(isql.Executor). Exec(ctx, "enable-merge-queue", nil, /* txn */ - `SET CLUSTER SETTING kv.range_merge.queue_enabled = false`); err != nil { + `SET CLUSTER SETTING kv.range_merge.queue.enabled = false`); err != nil { tc.Stopper().Stop(ctx) t.Fatal(err) } @@ -451,7 +451,7 @@ func (tc *TestCluster) Start(t serverutils.TestFataler) { if _, err := tc.Servers[0].SystemLayer(). InternalExecutor().(isql.Executor). Exec(ctx, "enable-split-by-load", nil, /*txn */ - `SET CLUSTER SETTING kv.range_split.by_load_enabled = false`); err != nil { + `SET CLUSTER SETTING kv.range_split.by_load.enabled = false`); err != nil { tc.Stopper().Stop(ctx) t.Fatal(err) } diff --git a/pkg/util/log/channel/channel_generated.go b/pkg/util/log/channel/channel_generated.go index 411aefd80b8c..48224ee7ee15 100644 --- a/pkg/util/log/channel/channel_generated.go +++ b/pkg/util/log/channel/channel_generated.go @@ -109,7 +109,7 @@ const SENSITIVE_ACCESS = logpb.Channel_SENSITIVE_ACCESS // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. const SQL_EXEC = logpb.Channel_SQL_EXEC diff --git a/pkg/util/log/eventpb/sql_audit_events.proto b/pkg/util/log/eventpb/sql_audit_events.proto index a64e3b02fef7..30cb9041318c 100644 --- a/pkg/util/log/eventpb/sql_audit_events.proto +++ b/pkg/util/log/eventpb/sql_audit_events.proto @@ -231,7 +231,7 @@ message TxnRowsReadLimitInternal { // are only emitted via external logging. // QueryExecute is recorded when a query is executed, -// and the cluster setting `sql.trace.log_statement_execute` is set. +// and the cluster setting `sql.log.all_statements.enabled` is set. message QueryExecute { CommonEventDetails common = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; CommonSQLEventDetails sql = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; diff --git a/pkg/util/log/log_channels_generated.go b/pkg/util/log/log_channels_generated.go index db8c83435f7f..9c18b5725aa9 100644 --- a/pkg/util/log/log_channels_generated.go +++ b/pkg/util/log/log_channels_generated.go @@ -4664,7 +4664,7 @@ type loggerSqlExec struct{} // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. var SqlExec loggerSqlExec @@ -4683,7 +4683,7 @@ var _ ChannelLogger = SqlExec // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `INFO` severity is used for informational messages that do not @@ -4702,7 +4702,7 @@ func (loggerSqlExec) Infof(ctx context.Context, format string, args ...interface // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `INFO` severity is used for informational messages that do not @@ -4721,7 +4721,7 @@ func (loggerSqlExec) VInfof(ctx context.Context, level Level, format string, arg // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `INFO` severity is used for informational messages that do not @@ -4739,7 +4739,7 @@ func (loggerSqlExec) Info(ctx context.Context, msg string) { // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `INFO` severity is used for informational messages that do not @@ -4756,7 +4756,7 @@ func (loggerSqlExec) InfofDepth(ctx context.Context, depth int, format string, a // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `WARNING` severity is used for situations which may require special handling, @@ -4775,7 +4775,7 @@ func (loggerSqlExec) Warningf(ctx context.Context, format string, args ...interf // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `WARNING` severity is used for situations which may require special handling, @@ -4794,7 +4794,7 @@ func (loggerSqlExec) VWarningf(ctx context.Context, level Level, format string, // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `WARNING` severity is used for situations which may require special handling, @@ -4812,7 +4812,7 @@ func (loggerSqlExec) Warning(ctx context.Context, msg string) { // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `WARNING` severity is used for situations which may require special handling, @@ -4829,7 +4829,7 @@ func (loggerSqlExec) WarningfDepth(ctx context.Context, depth int, format string // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `ERROR` severity is used for situations that require special handling, @@ -4849,7 +4849,7 @@ func (loggerSqlExec) Errorf(ctx context.Context, format string, args ...interfac // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `ERROR` severity is used for situations that require special handling, @@ -4869,7 +4869,7 @@ func (loggerSqlExec) VErrorf(ctx context.Context, level Level, format string, ar // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `ERROR` severity is used for situations that require special handling, @@ -4888,7 +4888,7 @@ func (loggerSqlExec) Error(ctx context.Context, msg string) { // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `ERROR` severity is used for situations that require special handling, @@ -4906,7 +4906,7 @@ func (loggerSqlExec) ErrorfDepth(ctx context.Context, depth int, format string, // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `FATAL` severity is used for situations that require an immedate, hard @@ -4926,7 +4926,7 @@ func (loggerSqlExec) Fatalf(ctx context.Context, format string, args ...interfac // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `FATAL` severity is used for situations that require an immedate, hard @@ -4946,7 +4946,7 @@ func (loggerSqlExec) VFatalf(ctx context.Context, level Level, format string, ar // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `FATAL` severity is used for situations that require an immedate, hard @@ -4965,7 +4965,7 @@ func (loggerSqlExec) Fatal(ctx context.Context, msg string) { // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. // // The `FATAL` severity is used for situations that require an immedate, hard @@ -4982,7 +4982,7 @@ func (loggerSqlExec) FatalfDepth(ctx context.Context, depth int, format string, // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. func (loggerSqlExec) Shout(ctx context.Context, sev Severity, msg string) { shoutfDepth(ctx, 1, sev, channel.SQL_EXEC, msg) @@ -4996,7 +4996,7 @@ func (loggerSqlExec) Shout(ctx context.Context, sev Severity, msg string) { // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. func (loggerSqlExec) Shoutf(ctx context.Context, sev Severity, format string, args ...interface{}) { shoutfDepth(ctx, 1, sev, channel.SQL_EXEC, format, args...) @@ -5009,7 +5009,7 @@ func (loggerSqlExec) Shoutf(ctx context.Context, sev Severity, format string, ar // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. func (loggerSqlExec) VEvent(ctx context.Context, level Level, msg string) { vEventf(ctx, false /* isErr */, 1, level, channel.SQL_EXEC, msg) @@ -5022,7 +5022,7 @@ func (loggerSqlExec) VEvent(ctx context.Context, level Level, msg string) { // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. func (loggerSqlExec) VEventf(ctx context.Context, level Level, format string, args ...interface{}) { vEventf(ctx, false /* isErr */, 1, level, channel.SQL_EXEC, format, args...) @@ -5034,7 +5034,7 @@ func (loggerSqlExec) VEventf(ctx context.Context, level Level, format string, ar // behalf of client connections: // // - Logical SQL statement executions (when enabled via the -// `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) +// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. func (loggerSqlExec) VEventfDepth(ctx context.Context, depth int, level Level, format string, args ...interface{}) { vEventf(ctx, false /* isErr */, 1+depth, level, channel.SQL_EXEC, format, args...) diff --git a/pkg/util/log/logcrash/crash_reporting.go b/pkg/util/log/logcrash/crash_reporting.go index ce2db47916ef..18205dad11ed 100644 --- a/pkg/util/log/logcrash/crash_reporting.go +++ b/pkg/util/log/logcrash/crash_reporting.go @@ -59,20 +59,22 @@ var ( false, settings.WithPublic) - // CrashReports wraps "diagnostics.reporting.send_crash_reports". + // CrashReports wraps "diagnostics.reporting.send_crash_reports.enabled". CrashReports = settings.RegisterBoolSetting( settings.TenantWritable, "diagnostics.reporting.send_crash_reports", "send crash and panic reports", true, + settings.WithName("diagnostics.reporting.send_crash_reports.enabled"), ) - // PanicOnAssertions wraps "debug.panic_on_failed_assertions" + // PanicOnAssertions wraps "debug.panic_on_failed_assertions.enabled" PanicOnAssertions = settings.RegisterBoolSetting( settings.TenantWritable, "debug.panic_on_failed_assertions", "panic when an assertion fails rather than reporting", false, + settings.WithName("debug.panic_on_failed_assertions.enabled"), ) // startTime records when the process started so that crash reports can diff --git a/pkg/util/log/logpb/log.proto b/pkg/util/log/logpb/log.proto index b87316f295ab..0aa5268c21b3 100644 --- a/pkg/util/log/logpb/log.proto +++ b/pkg/util/log/logpb/log.proto @@ -159,7 +159,7 @@ enum Channel { // behalf of client connections: // // - Logical SQL statement executions (when enabled via the - // `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) + // `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. SQL_EXEC = 9; diff --git a/pkg/util/tracing/tracer.go b/pkg/util/tracing/tracer.go index 53ff32b082e3..7c4061816094 100644 --- a/pkg/util/tracing/tracer.go +++ b/pkg/util/tracing/tracer.go @@ -117,6 +117,7 @@ var enableNetTrace = settings.RegisterBoolSetting( "trace.debug.enable", "if set, traces for recent requests can be seen at https:///debug/requests", false, + settings.WithName("trace.debug_http_endpoint.enabled"), settings.WithPublic) var openTelemetryCollector = settings.RegisterStringSetting( diff --git a/pkg/workload/indexes/indexes.go b/pkg/workload/indexes/indexes.go index 8df0849e5cc9..a3a2019c2d7c 100644 --- a/pkg/workload/indexes/indexes.go +++ b/pkg/workload/indexes/indexes.go @@ -117,11 +117,11 @@ func (w *indexes) Hooks() workload.Hooks { func maybeDisableMergeQueue(sqlDB *gosql.DB) error { var ok bool if err := sqlDB.QueryRow( - `SELECT count(*) > 0 FROM [ SHOW ALL CLUSTER SETTINGS ] AS _ (v) WHERE v = 'kv.range_merge.queue_enabled'`, + `SELECT count(*) > 0 FROM [ SHOW ALL CLUSTER SETTINGS ] AS _ (v) WHERE v = 'kv.range_merge.queue.enabled'`, ).Scan(&ok); err != nil || !ok { return err } - _, err := sqlDB.Exec("SET CLUSTER SETTING kv.range_merge.queue_enabled = false") + _, err := sqlDB.Exec("SET CLUSTER SETTING kv.range_merge.queue.enabled = false") return err } diff --git a/pkg/workload/workloadsql/workloadsql.go b/pkg/workload/workloadsql/workloadsql.go index 3abd08dda035..3f37ec359b7b 100644 --- a/pkg/workload/workloadsql/workloadsql.go +++ b/pkg/workload/workloadsql/workloadsql.go @@ -75,7 +75,7 @@ func Setup( func maybeDisableMergeQueue(db *gosql.DB) error { var ok bool if err := db.QueryRow( - `SELECT count(*) > 0 FROM [ SHOW ALL CLUSTER SETTINGS ] AS _ (v) WHERE v = 'kv.range_merge.queue_enabled'`, + `SELECT count(*) > 0 FROM [ SHOW ALL CLUSTER SETTINGS ] AS _ (v) WHERE v = 'kv.range_merge.queue.enabled'`, ).Scan(&ok); err != nil || !ok { return err } @@ -92,7 +92,7 @@ func maybeDisableMergeQueue(db *gosql.DB) error { if err == nil && (v.Major() > 19 || (v.Major() == 19 && v.Minor() >= 2)) { return nil } - _, err = db.Exec("SET CLUSTER SETTING kv.range_merge.queue_enabled = false") + _, err = db.Exec("SET CLUSTER SETTING kv.range_merge.queue.enabled = false") return err }