diff --git a/docs/generated/settings/settings-for-tenants.txt b/docs/generated/settings/settings-for-tenants.txt
index 49b5cc432944..eb954afb7d2b 100644
--- a/docs/generated/settings/settings-for-tenants.txt
+++ b/docs/generated/settings/settings-for-tenants.txt
@@ -10,14 +10,19 @@ bulkio.backup.file_size byte size 128 MiB target size for individual data files
bulkio.backup.read_timeout duration 5m0s amount of time after which a read attempt is considered timed out, which causes the backup to fail tenant-rw
bulkio.backup.read_with_priority_after duration 1m0s amount of time since the read-as-of time above which a BACKUP should use priority when retrying reads tenant-rw
bulkio.stream_ingestion.minimum_flush_interval duration 5s the minimum timestamp between flushes; flushes may still occur if internal buffers fill up tenant-rw
+changefeed.backfill.concurrent_scan_requests integer 0 number of concurrent scan requests per node issued during a backfill tenant-rw
changefeed.backfill.scan_request_size integer 524288 the maximum number of bytes returned by each scan request tenant-rw
changefeed.balance_range_distribution.enable boolean false if enabled, the ranges are balanced equally among all nodes tenant-rw
changefeed.batch_reduction_retry_enabled boolean false if true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizes tenant-rw
changefeed.event_consumer_worker_queue_size integer 16 if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can buffer tenant-rw
changefeed.event_consumer_workers integer 0 the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabled tenant-rw
changefeed.fast_gzip.enabled boolean true use fast gzip implementation tenant-rw
+changefeed.frontier_highwater_lag_checkpoint_threshold duration 10m0s controls the maximum the high-water mark is allowed to lag behind the leading spans of the frontier before per-span checkpointing is enabled; if 0, checkpointing due to high-water lag is disabled tenant-rw
+changefeed.memory.per_changefeed_limit byte size 512 MiB controls amount of data that can be buffered per changefeed tenant-rw
+changefeed.min_highwater_advance duration 0s minimum amount of time the changefeed high water mark must advance for it to be eligible for checkpointing; Default of 0 will checkpoint every time frontier advances, as long as the rate of checkpointing keeps up with the rate of frontier changes tenant-rw
changefeed.node_throttle_config string specifies node level throttling configuration for all changefeeeds tenant-rw
changefeed.protect_timestamp.max_age duration 96h0m0s fail the changefeed if the protected timestamp age exceeds this threshold; 0 disables expiration tenant-rw
+changefeed.protect_timestamp_interval duration 10m0s controls how often the changefeed forwards its protected timestamp to the resolved timestamp tenant-rw
changefeed.schema_feed.read_with_priority_after duration 1m0s retry with high priority if we were not able to read descriptors for too long; 0 disables tenant-rw
changefeed.sink_io_workers integer 0 the number of workers used by changefeeds when sending requests to the sink (currently webhook only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting value tenant-rw
cloudstorage.azure.concurrent_upload_buffers integer 1 controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an upload tenant-rw
diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html
index f83716d38945..ecc6ef2f0611 100644
--- a/docs/generated/settings/settings.html
+++ b/docs/generated/settings/settings.html
@@ -16,14 +16,19 @@
bulkio.backup.read_timeout
| duration | 5m0s | amount of time after which a read attempt is considered timed out, which causes the backup to fail | Serverless/Dedicated/Self-Hosted |
bulkio.backup.read_with_priority_after
| duration | 1m0s | amount of time since the read-as-of time above which a BACKUP should use priority when retrying reads | Serverless/Dedicated/Self-Hosted |
bulkio.stream_ingestion.minimum_flush_interval
| duration | 5s | the minimum timestamp between flushes; flushes may still occur if internal buffers fill up | Serverless/Dedicated/Self-Hosted |
+changefeed.backfill.concurrent_scan_requests
| integer | 0 | number of concurrent scan requests per node issued during a backfill | Serverless/Dedicated/Self-Hosted |
changefeed.backfill.scan_request_size
| integer | 524288 | the maximum number of bytes returned by each scan request | Serverless/Dedicated/Self-Hosted |
changefeed.balance_range_distribution.enable
| boolean | false | if enabled, the ranges are balanced equally among all nodes | Serverless/Dedicated/Self-Hosted |
changefeed.batch_reduction_retry_enabled
| boolean | false | if true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizes | Serverless/Dedicated/Self-Hosted |
changefeed.event_consumer_worker_queue_size
| integer | 16 | if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can buffer | Serverless/Dedicated/Self-Hosted |
changefeed.event_consumer_workers
| integer | 0 | the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabled | Serverless/Dedicated/Self-Hosted |
changefeed.fast_gzip.enabled
| boolean | true | use fast gzip implementation | Serverless/Dedicated/Self-Hosted |
+changefeed.frontier_highwater_lag_checkpoint_threshold
| duration | 10m0s | controls the maximum the high-water mark is allowed to lag behind the leading spans of the frontier before per-span checkpointing is enabled; if 0, checkpointing due to high-water lag is disabled | Serverless/Dedicated/Self-Hosted |
+changefeed.memory.per_changefeed_limit
| byte size | 512 MiB | controls amount of data that can be buffered per changefeed | Serverless/Dedicated/Self-Hosted |
+changefeed.min_highwater_advance
| duration | 0s | minimum amount of time the changefeed high water mark must advance for it to be eligible for checkpointing; Default of 0 will checkpoint every time frontier advances, as long as the rate of checkpointing keeps up with the rate of frontier changes | Serverless/Dedicated/Self-Hosted |
changefeed.node_throttle_config
| string |
| specifies node level throttling configuration for all changefeeeds | Serverless/Dedicated/Self-Hosted |
changefeed.protect_timestamp.max_age
| duration | 96h0m0s | fail the changefeed if the protected timestamp age exceeds this threshold; 0 disables expiration | Serverless/Dedicated/Self-Hosted |
+changefeed.protect_timestamp_interval
| duration | 10m0s | controls how often the changefeed forwards its protected timestamp to the resolved timestamp | Serverless/Dedicated/Self-Hosted |
changefeed.schema_feed.read_with_priority_after
| duration | 1m0s | retry with high priority if we were not able to read descriptors for too long; 0 disables | Serverless/Dedicated/Self-Hosted |
changefeed.sink_io_workers
| integer | 0 | the number of workers used by changefeeds when sending requests to the sink (currently webhook only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting value | Serverless/Dedicated/Self-Hosted |
cloudstorage.azure.concurrent_upload_buffers
| integer | 1 | controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an upload | Serverless/Dedicated/Self-Hosted |
diff --git a/pkg/ccl/changefeedccl/changefeedbase/settings.go b/pkg/ccl/changefeedccl/changefeedbase/settings.go
index e17f1a2d1f34..1458bcd9b148 100644
--- a/pkg/ccl/changefeedccl/changefeedbase/settings.go
+++ b/pkg/ccl/changefeedccl/changefeedbase/settings.go
@@ -49,7 +49,7 @@ var PerChangefeedMemLimit = settings.RegisterByteSizeSetting(
"changefeed.memory.per_changefeed_limit",
"controls amount of data that can be buffered per changefeed",
1<<29, // 512MiB
-)
+).WithPublic()
// SlowSpanLogThreshold controls when we will log slow spans.
var SlowSpanLogThreshold = settings.RegisterDurationSetting(
@@ -88,7 +88,7 @@ var FrontierHighwaterLagCheckpointThreshold = settings.RegisterDurationSetting(
"controls the maximum the high-water mark is allowed to lag behind the leading spans of the frontier before per-span checkpointing is enabled; if 0, checkpointing due to high-water lag is disabled",
10*time.Minute,
settings.NonNegativeDuration,
-)
+).WithPublic()
// FrontierCheckpointMaxBytes controls the maximum number of key bytes that will be added
// to the checkpoint record.
@@ -119,7 +119,7 @@ var ScanRequestLimit = settings.RegisterIntSetting(
"changefeed.backfill.concurrent_scan_requests",
"number of concurrent scan requests per node issued during a backfill",
0,
-)
+).WithPublic()
// ScanRequestSize is the target size of the scan request response.
//
@@ -181,7 +181,7 @@ var MinHighWaterMarkCheckpointAdvance = settings.RegisterDurationSetting(
"advances, as long as the rate of checkpointing keeps up with the rate of frontier changes",
0,
settings.NonNegativeDuration,
-)
+).WithPublic()
// EventMemoryMultiplier is the multiplier for the amount of memory needed to process an event.
//
@@ -209,7 +209,7 @@ var ProtectTimestampInterval = settings.RegisterDurationSetting(
"controls how often the changefeed forwards its protected timestamp to the resolved timestamp",
10*time.Minute,
settings.PositiveDuration,
-)
+).WithPublic()
// MaxProtectedTimestampAge controls the frequency of protected timestamp record updates
var MaxProtectedTimestampAge = settings.RegisterDurationSetting(
diff --git a/pkg/cli/interactive_tests/test_demo_node_cmds.tcl b/pkg/cli/interactive_tests/test_demo_node_cmds.tcl
index 320f20c84e73..a82326dd40f7 100644
--- a/pkg/cli/interactive_tests/test_demo_node_cmds.tcl
+++ b/pkg/cli/interactive_tests/test_demo_node_cmds.tcl
@@ -67,31 +67,26 @@ send "\\demo restart 3\r"
eexpect "node 3 has been restarted"
eexpect "defaultdb>"
-# NB: this is flaky, sometimes n3 is still marked as draining due to
-# gossip propagation delays. See:
-# https://github.com/cockroachdb/cockroach/issues/76391
-# send "select node_id, draining, decommissioning, membership from crdb_internal.gossip_liveness ORDER BY node_id;\r"
-# eexpect "1 | false | false | active"
-# eexpect "2 | false | false | active"
-# eexpect "3 | false | false | active"
-# eexpect "4 | false | false | active"
-# eexpect "5 | false | false | active"
-# eexpect "defaultdb>"
-
-# Try commissioning commands
+send "select node_id, draining, decommissioning, membership from crdb_internal.gossip_liveness ORDER BY node_id;\r"
+eexpect "1 | f | f | active"
+eexpect "2 | f | f | active"
+eexpect "3 | f | f | active"
+eexpect "4 | f | f | active"
+eexpect "5 | f | f | active"
+eexpect "defaultdb>"
+
+# Try decommissioning commands
send "\\demo decommission 4\r"
eexpect "node 4 has been decommissioned"
eexpect "defaultdb>"
-# NB: skipping this out of an abundance of caution, see:
-# https://github.com/cockroachdb/cockroach/issues/76391
-# send "select node_id, draining, decommissioning, membership from crdb_internal.gossip_liveness ORDER BY node_id;\r"
-# eexpect "1 | false | false | active"
-# eexpect "2 | false | false | active"
-# eexpect "3 | false | false | active"
-# eexpect "4 | false | true | decommissioned"
-# eexpect "5 | false | false | active"
-# eexpect "defaultdb>"
+send "select node_id, draining, membership from crdb_internal.kv_node_liveness ORDER BY node_id;\r"
+eexpect "1 | f | active"
+eexpect "2 | f | active"
+eexpect "3 | f | active"
+eexpect "4 | f | decommissioned"
+eexpect "5 | f | active"
+eexpect "defaultdb>"
send "\\demo recommission 4\r"
eexpect "can only recommission a decommissioning node"
@@ -128,17 +123,16 @@ eexpect "node 6 has been shutdown"
set timeout 30
eexpect "defaultdb>"
-# By now the node should have stabilized in gossip which allows us to query the more detailed information there.
-# NB: skip this to avoid flakes, see:
-# https://github.com/cockroachdb/cockroach/issues/76391
-# send "select node_id, draining, decommissioning, membership from crdb_internal.gossip_liveness ORDER BY node_id;\r"
-# eexpect "1 | false | false | active"
-# eexpect "2 | false | false | active"
-# eexpect "3 | false | false | active"
-# eexpect "4 | false | true | decommissioned"
-# eexpect "5 | false | false | active"
-# eexpect "6 | true | false | active"
-# eexpect "defaultdb>"
+# NB: use kv_node_liveness to avoid flakes due to gossip delays.
+# See https://github.com/cockroachdb/cockroach/issues/76391
+send "select node_id, draining, membership from crdb_internal.kv_node_liveness ORDER BY node_id;\r"
+eexpect "1 | f | active"
+eexpect "2 | f | active"
+eexpect "3 | f | active"
+eexpect "4 | f | decommissioned"
+eexpect "5 | f | active"
+eexpect "6 | t | active"
+eexpect "defaultdb>"
send "\\q\r"
eexpect eof