diff --git a/Makefile b/Makefile
index 36e5176891ca..a080cdd52a4d 100644
--- a/Makefile
+++ b/Makefile
@@ -714,14 +714,10 @@ all: build
.PHONY: c-deps
c-deps: $(C_LIBS_CCL)
-build-mode = build -o $(build-output)
+build-mode = build -o $@
go-install: build-mode = install
-$(COCKROACH): build-output = $(COCKROACH)
-$(COCKROACHOSS): build-output = $(COCKROACHOSS)
-$(COCKROACHSHORT): build-output = $(COCKROACHSHORT)
-
$(COCKROACH) go-install generate: pkg/ui/distccl/bindata.go
$(COCKROACHOSS): BUILDTARGET = ./pkg/cmd/cockroach-oss
@@ -779,7 +775,8 @@ buildshort: ## Build the CockroachDB binary without the admin UI.
build: $(COCKROACH)
buildoss: $(COCKROACHOSS)
buildshort: $(COCKROACHSHORT)
-build buildoss buildshort: $(DOCGEN_TARGETS) $(if $(is-cross-compile),,$(SETTINGS_DOC_PAGE))
+build buildoss buildshort: $(DOCGEN_TARGETS)
+build buildshort: $(if $(is-cross-compile),,$(SETTINGS_DOC_PAGE))
# For historical reasons, symlink cockroach to cockroachshort.
# TODO(benesch): see if it would break anyone's workflow to remove this.
@@ -1285,9 +1282,10 @@ bin/.docgen_functions: bin/docgen
docgen functions docs/generated/sql --quiet
touch $@
-$(SETTINGS_DOC_PAGE): $(build-output)
- @$(build-output) gen settings-list --format=html > $@.tmp
- @mv -f $@.tmp $@
+settings-doc-gen := $(if $(filter buildshort,$(MAKECMDGOALS)),$(COCKROACHSHORT),$(COCKROACH))
+
+$(SETTINGS_DOC_PAGE): $(settings-doc-gen)
+ @$(settings-doc-gen) gen settings-list --format=html > $@
optgen-defs := pkg/sql/opt/ops/*.opt
optgen-norm-rules := pkg/sql/opt/norm/rules/*.opt
diff --git a/build/variables.mk b/build/variables.mk
index 8f87704111ae..4f1e9b34faa0 100644
--- a/build/variables.mk
+++ b/build/variables.mk
@@ -152,7 +152,6 @@ define VALID_VARS
bindir
bins
build-mode
- build-output
cmake-flags
configure-flags
cyan
@@ -176,6 +175,7 @@ define VALID_VARS
optgen-package
optgen-xform-rules
prefix
+ settings-doc-gen
sig
space
sse
diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html
index 90db15bceb04..58b2f6ece93b 100644
--- a/docs/generated/settings/settings.html
+++ b/docs/generated/settings/settings.html
@@ -18,16 +18,20 @@
kv.allocator.lease_rebalancing_aggressiveness | float | 1 | set greater than 1.0 to rebalance leases toward load more aggressively, or between 0 and 1.0 to be more conservative about rebalancing leases |
kv.allocator.load_based_lease_rebalancing.enabled | boolean | true | set to enable rebalancing of range leases based on load and latency |
kv.allocator.range_rebalance_threshold | float | 0.05 | minimum fraction away from the mean a store's range count can be before it is considered overfull or underfull |
-kv.allocator.stat_based_rebalancing.enabled | boolean | true | set to enable rebalancing range replicas and leases to more evenly distribute read and write load across the stores in a cluster |
+kv.allocator.stat_based_rebalancing.enabled | boolean | false | set to enable rebalancing range replicas and leases to more evenly distribute read and write load across the stores in a cluster |
kv.allocator.stat_rebalance_threshold | float | 0.2 | minimum fraction away from the mean a store's stats (like disk usage or writes per second) can be before it is considered overfull or underfull |
kv.bulk_io_write.concurrent_export_requests | integer | 5 | number of export requests a store will handle concurrently before queuing |
kv.bulk_io_write.concurrent_import_requests | integer | 1 | number of import requests a store will handle concurrently before queuing |
kv.bulk_io_write.max_rate | byte size | 8.0 EiB | the rate limit (bytes/sec) to use for writes to disk on behalf of bulk io ops |
kv.bulk_sst.sync_size | byte size | 2.0 MiB | threshold after which non-Rocks SST writes must fsync (0 disables) |
+kv.closed_timestamp.close_fraction | float | 0.2 | fraction of closed timestamp target duration specifying how frequently the closed timestamp is advanced |
+kv.closed_timestamp.follower_reads_enabled | boolean | false | allow (all) replicas to serve consistent historical reads based on closed timestamp information |
+kv.closed_timestamp.target_duration | duration | 5s | if nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this duration |
kv.raft.command.max_size | byte size | 64 MiB | maximum size of a raft command |
kv.raft_log.synchronize | boolean | true | set to true to synchronize on Raft log writes to persistent storage ('false' risks data loss) |
kv.range.backpressure_range_size_multiplier | float | 2 | multiple of range_max_bytes that a range is allowed to grow to without splitting before writes to that range are blocked, or 0 to disable |
kv.range_descriptor_cache.size | integer | 1000000 | maximum number of entries in the range descriptor and leaseholder caches |
+kv.rangefeed.enabled | boolean | false | if set, rangefeed registration is enabled |
kv.snapshot_rebalance.max_rate | byte size | 2.0 MiB | the rate limit (bytes/sec) to use for rebalance snapshots |
kv.snapshot_recovery.max_rate | byte size | 8.0 MiB | the rate limit (bytes/sec) to use for recovery snapshots |
kv.transaction.max_intents_bytes | integer | 256000 | maximum number of bytes used to track write intents in transactions |
@@ -35,10 +39,10 @@
kv.transaction.write_pipelining_enabled | boolean | true | if enabled, transactional writes are pipelined through Raft consensus |
kv.transaction.write_pipelining_max_batch_size | integer | 0 | if non-zero, defines that maximum size batch that will be pipelined through Raft consensus |
rocksdb.min_wal_sync_interval | duration | 0s | minimum duration between syncs of the RocksDB WAL |
+schemachanger.lease.duration | duration | 5m0s | the duration of a schema change lease |
+schemachanger.lease.renew_fraction | float | 0.4 | the fraction of schemachanger.lease_duration remaining to trigger a renew of the lease |
server.clock.forward_jump_check_enabled | boolean | false | if enabled, forward clock jumps > max_offset/2 will cause a panic. |
server.clock.persist_upper_bound_interval | duration | 0s | the interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature. |
-server.closed_timestamp.close_fraction | float | 0.2 | desc |
-server.closed_timestamp.target_duration | duration | 5s | if nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this duration |
server.consistency_check.interval | duration | 24h0m0s | the time between range consistency checks; set to 0 to disable consistency checking |
server.declined_reservation_timeout | duration | 1s | the amount of time to consider the store throttled for up-replication after a reservation was declined |
server.failed_reservation_timeout | duration | 5s | the amount of time to consider the store throttled for up-replication after a failed reservation call |
@@ -51,6 +55,7 @@
server.web_session_timeout | duration | 168h0m0s | the duration that a newly created web session will be valid |
sql.defaults.distsql | enumeration | 1 | default distributed SQL execution mode [off = 0, auto = 1, on = 2] |
sql.defaults.optimizer | enumeration | 1 | default cost-based optimizer mode [off = 0, on = 1, local = 2] |
+sql.defaults.serial_normalization | enumeration | 0 | default handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2] |
sql.distsql.distribute_index_joins | boolean | true | if set, for index joins we instantiate a join reader on every node that has a stream; if not set, we use a single join reader |
sql.distsql.flow_stream_timeout | duration | 10s | amount of time incoming streams wait for a flow to be set up before erroring out |
sql.distsql.interleaved_joins.enabled | boolean | true | if set we plan interleaved table joins instead of merge joins when possible |
@@ -62,14 +67,17 @@
sql.metrics.statement_details.dump_to_logs | boolean | false | dump collected statement statistics to node logs when periodically cleared |
sql.metrics.statement_details.enabled | boolean | true | collect per-statement query statistics |
sql.metrics.statement_details.threshold | duration | 0s | minimum execution time to cause statistics to be collected |
+sql.tablecache.lease.refresh_limit | integer | 50 | maximum number of tables to periodically refresh leases for |
sql.trace.log_statement_execute | boolean | false | set to true to enable logging of executed statements |
sql.trace.session_eventlog.enabled | boolean | false | set to true to enable session tracing |
sql.trace.txn.enable_threshold | duration | 0s | duration beyond which all transactions are traced (set to 0 to disable) |
-timeseries.resolution_10s.storage_duration | duration | 720h0m0s | the amount of time to store timeseries data |
+timeseries.resolution_10s.storage_duration | duration | 720h0m0s | deprecated setting: the amount of time to store timeseries data. Replaced by timeseries.storage.10s_resolution_ttl. |
+timeseries.storage.10s_resolution_ttl | duration | 240h0m0s | the maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion. |
+timeseries.storage.30m_resolution_ttl | duration | 2160h0m0s | the maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion. |
timeseries.storage.enabled | boolean | true | if set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhere |
trace.debug.enable | boolean | false | if set, traces for recent requests can be seen in the /debug page |
trace.lightstep.token | string |
| if set, traces go to Lightstep using this token |
trace.zipkin.collector | string |
| if set, traces go to the given Zipkin instance (example: '127.0.0.1:9411'); ignored if trace.lightstep.token is set. |
-version | custom validation | 2.0-11 | set the active cluster version in the format '.'. |
+version | custom validation | 2.0-12 | set the active cluster version in the format '.'. |