From f79639e4df8bcb031aee88f0442230520c19cee8 Mon Sep 17 00:00:00 2001 From: mikeCRL Date: Fri, 12 May 2023 02:07:05 -0400 Subject: [PATCH 01/18] WIP 23.1.0 GA Release Notes --- _config_base.yml | 10 +- _config_cockroachdb.yml | 2 +- _data/releases.yml | 23 +- _data/versions.csv | 2 +- _includes/releases/v23.1/v23.1.0.md | 499 ++++++++++++++++++++++++++++ css/customstyles.scss | 9 + v23.1/upgrade-cockroach-version.md | 12 +- 7 files changed, 542 insertions(+), 15 deletions(-) create mode 100644 _includes/releases/v23.1/v23.1.0.md diff --git a/_config_base.yml b/_config_base.yml index 2a82537f6ac..cbc1cbb8121 100644 --- a/_config_base.yml +++ b/_config_base.yml @@ -152,12 +152,12 @@ release_info: start_time: 2023-05-03 12:20:58.860667 +0000 UTC version: v22.2.9 v23.1: - build_time: 2023-05-04 00:00:00 (go1.19) + build_time: 2023-05-15 00:00:00 (go1.19) crdb_branch_name: release-23.1 - docker_image: cockroachdb/cockroach-unstable - name: v23.1.0-rc.2 - start_time: 2023-05-03 16:33:03.563820 +0000 UTC - version: v23.1.0-rc.2 + docker_image: cockroachdb/cockroach + name: v23.1.0 + start_time: 2023-05-15 16:33:03.563820 +0000 UTC + version: v23.1.0 sass: quiet_deps: 'true' sass_dir: css diff --git a/_config_cockroachdb.yml b/_config_cockroachdb.yml index 2b0c265e775..d326713e65d 100644 --- a/_config_cockroachdb.yml +++ b/_config_cockroachdb.yml @@ -7,4 +7,4 @@ destination: _site/docs homepage_title: CockroachDB Docs versions: dev: v23.1 - stable: v22.2 + stable: v23.1 diff --git a/_data/releases.yml b/_data/releases.yml index 10bb7fd9c96..83ead5904e8 100644 --- a/_data/releases.yml +++ b/_data/releases.yml @@ -4314,8 +4314,6 @@ source: true previous_release: v22.2.8 - - - release_name: v22.1.20 major_version: v22.1 release_date: '2023-05-12' @@ -4336,3 +4334,24 @@ docker_arm: false source: true previous_release: v22.1.19 + +- release_name: v23.1.0 + major_version: v23.1 + release_date: '2023-05-15' + release_type: Production + go_version: go1.18 + sha: 358e0d87912365b8976c55ab9b3292e999cf720d + has_sql_only: true + has_sha256sum: true + mac: + mac_arm: true + windows: true + linux: + linux_arm: true + linux_intel_fips: true + linux_arm_fips: false + docker: + docker_image: cockroachdb/cockroach + docker_arm: true + source: true + previous_release: v23.1.0-rc.2 diff --git a/_data/versions.csv b/_data/versions.csv index f97a0948734..702fcd59e76 100644 --- a/_data/versions.csv +++ b/_data/versions.csv @@ -11,4 +11,4 @@ v21.1,2021-05-18,2022-05-18,2022-11-18,v20.2 v21.2,2021-11-16,2022-11-16,2023-05-16,v21.1 v22.1,2022-05-24,2023-05-24,2023-11-24,v21.2 v22.2,2022-12-05,2023-12-05,2024-06-05,v22.1 -v23.1,N/A,N/A,N/A,v22.2 +v23.1,2023-05-10,2024-05-15,2024-11-15,v22.2 diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md new file mode 100644 index 00000000000..0003239e24d --- /dev/null +++ b/_includes/releases/v23.1/v23.1.0.md @@ -0,0 +1,499 @@ +## v23.1.0 + +Release Date: May 15, 2023 + +With the release of CockroachDB v23.1, we've added new capabilities in CockroachDB to help you build, optimize, and operate more effectively and efficiently. Check out a [summary of the most significant user-facing changes](#v23-1-0-feature-highlights) and then [upgrade to CockroachDB v23.1](../v23.1/upgrade-cockroach-version.html). + +For demos and videos on the new features, see the [v23.1 launch page](https://www.cockroachlabs.com/23-1-launch). + +{% include releases/release-downloads-docker-image.md release=include.release %} + +

{{ site.data.products.db }}

+ +- Get a free v23.1 cluster on {{ site.data.products.serverless }}. +- Learn about recent updates to {{ site.data.products.db }} in the [{{ site.data.products.db }} Release Notes](cloud.html). + +

Feature highlights

+ +This section summarizes the most significant user-facing changes in v23.1.0. For a complete list of features and changes, including bug fixes and performance improvements, see the [release notes](index.html#testing-releases) for previous testing releases. You can also search for [what's new in v23.1 in our docs](../search.html?query=new%20in%20v23.1). + +{{site.data.alerts.callout_info}} +The features highlighted below are freely available in {{ site.data.products.core }} and do not require an [enterprise license](https://www.cockroachlabs.com/get-cockroachdb/enterprise/), unless otherwise noted. [{{ site.data.products.db }} clusters](https://cockroachlabs.cloud/) include all enterprise features. You can also use [`cockroach demo`](../v23.1/cockroach-demo.html) to test enterprise features in a local, temporary cluster. +{{site.data.alerts.end}} + +- [SQL](#v23-1-0-sql) +- [Security and compliance](#v23-1-0-security-and-compliance) +- [Recovery and I/O](#v23-1-0-recovery-and-io) +- [Database operations](#v23-1-0-database-operations) +- [Backward-incompatible changes](#v23-1-0-backward-incompatible-changes) +- [Deprecations](#v23-1-0-deprecations) +- [Known limitations](#v23-1-0-known-limitations) +- [Additional resources](#v23-1-0-additional-resources) + + + +

SQL

+ +
Queries
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureDescription
Full-text search using TSVector and TSQuery +

A full-text search is used to perform queries on natural-language documents such as articles, websites, or other written formats, with results often sorted by relevance.

+

You can rely on new built-in functions to make use of the new TSVECTOR and TSQUERY data types.

+
Improved developer experience for multi-region apps +

If you have functionality that requires low latency and cannot tolerate delays between regions, you can enable the enforce_home_region option, which ensures that queries are executed within a single region. If a query doesn't have a home region or is running outside of its home region, the optimizer now provides improved feedback and suggestions for executing the query within a single region. +

Streamline migrations with improved COPY performance +

Enhancements to the functionality behind COPY statements resulted in 2x faster migrations using AWS DMS. +

Redact PII from statement bundles +

Leverage statement bundles for debugging without introducing data privacy concerns. You can now redact personally identifiable information (PII) from statement bundles for PCI compliance. +

User-Defined Function (UDF) enhancements +

User-defined functions offer enhanced flexibility, performance, and reusability. This release brings a number of UDF enhancements, including: Inlining of supported UDFs within the query plan to improve performance; support for subqueries in statements, support for expressions with a * such as SELECT * , and support for returning a set of results (using SETOF). +

+UDFs can now also be used in Changefeed expressions (Enterprise) and CHECK constraints, and referenced from other objects. Validations have been added to guarantee that all statements in the function body should be as strict as the expected UDF volatility. UDFs are also now included in backup and restore operations. +

DELETE FROM ... USING +

We have added support for the USING clause on DELETE, which allows joining multiple tables for a DELETE clause. This change is in line with PostgreSQL functionality and extends the flexibility of DELETE. +

+ +
Schemas
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureDescription
Support user-defined composite types +

You can now create your own composite data types, in addition to our previous support for enumerated data types. +

+For example: +

Declarative schema changer supports user-defined functions UDFs +

The statements CREATE FUNCTION and DROP FUNCTION are now supported by the declarative schema changer. +

Declarative schema changer fully supports constraints +

The statements ALTER TABLE...ADD CONSTRAINT and ALTER TABLE...DROP CONSTRAINT are now supported by the declarative schema changer.

Add configurable setting to adjust grant lease options +

A new session variable allow_role_memberships_to_change_during_transaction has been introduced which, when true, will make granting and revoking of role memberships faster at the cost of allowing some in-progress transactions to observe the previous role membership. +

+By default, when granting or revoking a role from another role, the system waits until all transactions that are consulting the current set of role memberships to complete. This is done to preserve CockroachDB’s default isolation level. However, the downside of this wait is that grant and revoke will take longer than the longest currently executing transaction. +

+In some cases, you may not care about whether concurrent transactions will immediately see the side-effects of the role grant or revoke operation, but would instead prefer that the operation finish quickly. +

+For more information about this setting and how it works, see the Limitations section of the GRANT documentation. +

+ +
Sessions
+ + + + + + + + + + + + + + + + + + + + + + +
FeatureDescription
New SQL shell editor with tab completion +

The SQL shell has a new user interface that allows tab completion and more advanced navigation, streamlining developer workflows. +

+After pressing tab, you can navigate database objects, keywords, and functions using arrow keys, pressing tab again to select one and return to the console. You can also use pattern matching to filter these entities and find what you need faster. +

Support multiple active portals (Preview) + +

The multiple active portals feature of the Postgres wire protocol (pgwire) is available, with limitations. This allows for more efficient data retrieval by reducing the number of roundtrips required between the client and server. +

+Third-party tools such as asyncpg use this feature to implement efficient asynchronous communication between the PostgreSQL server and client. This can allow for faster, more scalable applications that can handle large amounts of data without slowing down the user experience. +

Full support for asyncpg +

CockroachDB now offers full support for asyncpg, a PostgreSQL database interface library designed specifically for Python's asyncio framework. It provides an efficient implementation of the PostgreSQL server binary protocol for high-performance asynchronous database applications. +

+Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simple, flexible, and efficient adapter for working with PostgreSQL databases. This makes it ideal for handling large volumes of data and scaling applications to meet demanding performance requirements. +

+ +

Security & Compliance

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureDescription
Improvements to the redaction of data in observability artifacts +

We have made a number of improvements to ensure that sensitive data can be redacted in observability artifacts produced by CockroachDB, such as debug.zip and statement bundles. These improvements, available to all Self-Hosted customers, also help Cockroach Labs to comply with PCI DSS in CockroachDB Dedicated. +

FIPS-ready CockroachDB binaries +

FIPS-ready binaries and Docker images are available for CockroachDB 23.1.0 and above. Federal Information Processing Standards (FIPS) 140-2 is a standard used to approve cryptographic modules by the U.S. and Canadian governments for systems maintained by relevant agencies and organizations working with them for the purposes of encrypting and decrypting sensitive data. +

FIPS-ready CockroachDB binaries are designed for workloads that require FIPS 140-2. FIPS-ready CockroachDB delegates cryptographic operations to the OpenSSL library available on the host operating system, rather than Go's cryptographic libraries. We recommend that OpenSSL has a FIPS 140-2 certificate. FIPS mode must be enabled in the Linux kernel to ensure that FIPS 140-2 is enforced by the operating system. +

Support Oauth authentication protocol for changefeeds +

See this item in Change Data Capture (Changefeeds). +

Support encrypted backups with keys stored in Azure Key Vault +

You can now take and restore encrypted backups using RSA keys stored in Azure Key Vault. +

Expand on External Connections for Changefeeds +

See the Changefeeds section for more information. +

New fine-grained system privilege to view all jobs +

The new VIEWJOB system privilege allows a user to view all jobs when running commands like SHOW JOBS, without granting additional capabilities. This helps ensure alignment with the principle of least privilege by removing the need to assign broader privileges or role options like CONTROLJOB. +

+ +

IO & Recovery

+ +
Change data capture (Changefeeds)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureDescription
Create scheduled exports using changefeeds +

Changefeeds can offer benefits over existing export functionality for customers who need higher levels of scale and observability. You can now create changefeed exports on a schedule, similar to the scheduling feature for backups. This extends our existing Changefeeds as Export functionality. +

Use a webhook as a changefeed sink +

The use of a webhook sink to deliver changefeed messages to an arbitrary HTTPS endpoint has been promoted from Preview to GA. +

Add parquet format to changefeeds +

The Parquet format offers efficient compression and processing of large data exports, further extending the capabilities of Changefeeds as Exports. +

CDC Queries +

CDC Queries (Formerly CDC Transformations) have been promoted from Preview to GA. +

Use External Connections (GA) to remove a data exfiltration vector +

Use external connections to specify and interact with resources that are external from CockroachDB. With CREATE EXTERNAL CONNECTION, you define a name for an external connection while passing the provider URI and query parameters. BACKUP, RESTORE, IMPORT, EXPORT, and CREATE CHANGEFEED queries can interact with the defined external connection instead of a required, provider-specific URI. As a result, you can decouple the management and permissions of the external resource from the operation in which you're using them. +

+With the move from Preview to GA, this feature brings many new capabilities, such as fine-grained permission and support for schema registries, webhook and GC PubSub sinks, and the SHOW command. +

Changefeed locality +

CREATE CHANGEFEED now accepts a 'WITH execution_locality' option to restrict execution of the changefeed process to nodes within the specified locality filter. +

Improved changefeed resilience +

Changefeeds are more stable as the result of improved error handling. Changefeeds now default to retrying requests when encountering any error, except those deemed terminal. +

Support Oauth authentication protocol for changefeeds +

Oauth authentication uses a third-party software provider to authenticate with Kafka instead of providing CockroachDB with direct access to Kafka cluster credentials. The third-party authentication server provides a temporary credential token that CockroachDB then uses to connect to a customer’s Kafka cluster. This represents a security best practice, allowing users to authenticate without directly storing or sharing their credentials. +

+ +
Disaster Recovery
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureDescription
Backup/Restore: Restrict to a specific set of nodes, or locality +

You can now restrict backup execution to a specific region/locality. Only the nodes in the specified locality filter will execute the backup, and will need access to the backup storage bucket. +

+If the node executing the backup does not have a specific range to be backed up, it will read it from the closest replica it can. +

Support encrypted backups with keys stored in Azure Key Vault +

You can now take and restore encrypted backups using RSA keys stored in Azure Key Vault. +

Support longer incremental backup chains +

We now support up to 400 incremental backups, an 8x increase, enabling you to preserve your data even more effectively (reducing RPO) while being more cost-efficient. Incremental backups contain only the data that has changed since the last backup, so they are smaller and faster to produce. +

Enforce supported backup versions +

To help ensure backups and restores are successful, CockroachDB now enforces its previous support for restoring backups from up to two minor versions prior. Previously, restoring backups produced from even earlier versions was possible, but unreliable. Now, this operation is prevented with an error. +

+ +

Database Operations

+ +
Observability
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureDescription
Key Visualizer (Preview) +

Using a visual heatmap of the latest historical range activity across the cluster, you can quickly identify hot spots and full table scans, enabling you to target ranges for performance investigations. +

Enhanced Intelligent Insights experience includes transaction-level insights and metrics +

We have expanded the Insights section of the Console, offering improved discoverability and data for tuning and optimizing your workload. Contention insights reveal the waiting statement for cases where blocking conflicts occur. Transaction-level insights help you identify impacted areas of your application and prioritize your investigations, enabling you to drill down to individual statements with suboptimal plans. +

Enhanced statement metrics such as: Latency profiles, CPU, MVCC garbage statistics, and Idle/client time is capture per statement +

Easily correlate high-level cluster metrics (e.g., CPU, latency, etc.) with CPU utilization, latency metrics (P50, P90, P99, min, max), and MVCC garbage statistics per statement. +

Faster performance and an enhanced UX for the SQL Activity pages +

Reliably and quickly find SQL activity information using a new interactive ‘Search Criteria’ capability in the console. +

Correlate common troubleshooting user flows with additional observability information into indexes used per statement. +

Observability information is available to correlate statements (and their plans) to indexes. Users can map index usage statistics with statements and transactions which streamlines troubleshooting user flows such as dropping infrequently used indexes, creating or updating table statistics, reducing MVCC garbage, and alleviating resource hot spots. +

+ +
KV Layer
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureDescription
Decommission Pre-Flight Checks +

Decommissioning operations now check that each replica on a node that is slated to be decommissioned can be moved to another node. +

+Any ranges that are not yet fully upreplicated will block the decommission process. +

+When errors are detected that would prevent successful decommissioning,, the errors are printed to STDERR and the decommissioning command exits. +

Delegated Snapshots: Send Raft snapshots between follower replicas +

Delegated Snapshots make multi-region deployments more cost-efficient by decreasing the use of remote snapshots if there is a local snapshot with the data. +

+Sending data locally reduces your network costs and frees up the WAN bandwidth for the important data that must be transferred +

+Previously, customers with multi-site deployments paid more for network bandwidth than strictly necessary due to system operations that required snapshots. Additionally, because of congested TCP channels between regions, transferring snapshots could negatively impact user traffic and slow snapshot transfers. +

+Delegated Snapshots fixes this problem by sending snapshots from a local replica whenever a close-by replica exists and isn’t busy. +

Faster leaseholder recovery +

The default CockroachDB lease duration has been reduced from 9 seconds to 6 seconds, to reduce range unavailability following leaseholder loss. Some other related settings have also had their defaults reduced, including heartbeat intervals, Raft election timeouts, and network timeouts. +

Lower default TTL for garbage collection (GC) +

The default GC TTL value in 23.1.0 is being reduced from 25 hrs to 4 hrs for new clusters. +

+This change is being made to improve read performance, storage utilization, and cluster stability in high write traffic scenarios. +Scheduled backups will not be affected by this change as protected timestamps will ensure data isn't garbage-collected until it has been backed up. Changefeeds will also not be affected. If you want a 25-hour or larger GC TTL value (for example, to support AS OF SYSTEM TIME queries that go further back in time), you can explicitly set GC TTL to the desired value. +

+This change will only apply to new clusters. Existing clusters will retain the 25-hour default when upgrading, unless you have previously overridden it with an explicit value, in which case, that value will be retained. Backups taken from clusters running versions prior to v23.1 will similarly retain the GC TTL configured when the backup was taken. +

+ +

Backward-incompatible changes

+ +Before [upgrading to CockroachDB v23.1](../v23.1/upgrade-cockroach-version.html), be sure to review the following backward-incompatible changes, as well as key cluster setting changes, and adjust your deployment as necessary. + +- Replaced the `cdc_prev()` [function](../v23.1/functions-and-operators.html) in favor of a `cdc_prev` tuple. This is an incompatible change that may break [changefeeds](../v23.1/change-data-capture-overview.html) that use the previous `cdc_prev()` function. [#85177][#85177] +- [`SHOW RANGES FOR TABLE`](../v23.1/show-ranges.html) now includes rows for all indexes that support the table. Prior to this change, `SHOW RANGES FOR TABLE foo` was an alias for `SHOW RANGES FOR INDEX foo@primary`. This was causing confusion, as it would miss data for secondary indexes. It is still possible to filter to just the primary index using `SHOW RANGES FOR INDEX foo@primary`. The statement output now also includes the index name. [#93545][#93545] +- CockroachDB now supports sharing storage ranges across multiple indexes/tables. As a result, there is no longer a guarantee that there is at most one SQL object (e.g., table/index/sequence/materialized view) per storage range. Therefore, the columns `table_id`, `database_name`, `schema_name`, `table_name` and `index_name` in `crdb_internal.ranges` and `.ranges_no_leases` have become nonsensical: a range cannot be attributed to a single table/index anymore. As a result: + + - The aforementioned columns in the `crdb_internal` virtual tables have been removed. Existing code can use the [`SHOW RANGES`](../v23.1/show-ranges.html) statement instead, optionally using `WITH KEYS` to expose the raw start/end keys. + - `SHOW RANGES FROM DATABASE` continues to report one row per range, but stops returning the database / schema / table / index name. + - `SHOW RANGES FROM TABLE` continues to report one row per range, but stops returning the index name. Suggested replacements: + - Instead of: `SELECT range_id FROM crdb_internal.ranges WHERE table_name = 'x'`, use: `SELECT range_id FROM [SHOW RANGES FROM TABLE x]` + - Instead of `SELECT range_id FROM crdb_internal.ranges WHERE table_name = $1 OR table_id = $2` (variable / unpredictable table name or ID), use: `SELECT range_id FROM [SHOW RANGES FROM CURRENT_CATALOG WITH TABLES] WHERE table_name = $1 OR table_id = $2` + - Instead of `SELECT start_key FROM crdb_internal.ranges WHERE table_name = 'x'`, use: `SELECT raw_start_key FROM [SHOW RANGES FROM TABLE x WITH KEYS]` + - Instead of `SELECT start_key FROM crdb_internal.ranges WHERE table_name = $1 OR table_id = $2` (unpredictable / variable table name or ID), use: `SELECT raw_start_key FROM [SHOW RANGES FROM CURRENT_CATALOG WITH TABLES, KEYS] WHERE table_name = $1 OR table_id = $2` [#93644][#93644] +- The format of the columns `start_key` and `end_key` for `SHOW RANGES FROM DATABASE` and `SHOW RANGES FROM TABLE` have been extended to include which table/index the key belongs to. This is necessary because a range can now contain data from more than one table/index. [#93644][#93644] +- The format of the columns `start_key` and `end_key` for `SHOW RANGE ... FOR ROW` has been changed to be consistent with the output of `SHOW RANGES FROM INDEX`. [#93644][#93644] +- The output of [`SHOW RANGES`](../v23.1/show-ranges.html) no longer includes `range_size`, `range_size_mb`, `lease_holder`, or `lease_holder_localities` by default. This ensures that `SHOW RANGES` remains fast in the common case. Use the new option `WITH DETAILS` to include these columns. [#93644][#93644] +- If a SQL database is created with a name that starts with `cluster:...` (e.g., `CREATE DATABASE "cluster:foo"`, clients will no longer be able to connect to it directly via a pre-existing URL connection string. The URL will need to be modified in this case. For example: + - Previously: `postgres://servername/cluster:foo`; now: `postgres://servername/cluster:foo&options=-ccluster=system` + - This syntax selects the tenant named `system` and then the database named `cluster:foo` inside it. When the `-ccluster:system` option is not specified, `cluster:foo` in the database name position is interpreted as a request to connect to a tenant named `foo`, which likely does not exist. Connections to databases whose name does not start with `cluster:` (the most common case) are not affected by this change. [#92580][#92580] + +- Changefeeds using "preview" expressions (released in v23.1.0) and that access the previous state of the row using the `cdc_prev()` function will no longer work and will need to be recreated with new syntax. [#94429][#94429] +- Fixed a bug where, when `server.identity_map.configuration` was used, CockroachDB did not verify the client-provided username against the target mappings. Note that **this means that the client must now provide a valid DB username.** This requirement is compatible with PostgreSQL; it was not previously required by CockroachDB but it is now. This does not apply when identity maps are not in use. [#94915][#94915] +- Previously, the type of the `replicas`, `voting_replicas`,`non_voting_replicas` and `learner_replicas` in `crdb_internal.ranges` were overridden to `INT2VECTOR` causing incompatible indexing between `.ranges` and `.ranges_no_leases`. Now the types of those columns in the two tables are set to `INT[]`. [#96287][#96287] +- The output of the [`SHOW RANGES`](../v23.1/show-ranges.html) command for the `crdb_internal.ranges` and `crdb_internal.ranges_no_leases` tables has been updated, and the previous output is deprecated. To enable the new command output, set the `sql.show_ranges_deprecated_behavior.enabled` [cluster setting](../v23.1/cluster-settings.html) to `false`. The new output will become default in v23.2. [#99618][#99618] +- Previously, if a user specified a [`search_path`](../v23.1/sql-name-resolution.html#current-schema) in the connection string parameters, it would always be treated as case sensitive. Now, in order to have the schema names in the `search_path` respect case, the user must include double quotes around the name. [#101492][#101492] + +

Key Cluster Setting Changes

+ +| Category | Description | Change Type | Backport version | +|---|---|---|---| +| SQL language change | The `backup.restore_span.target_size` cluster setting now defaults to `384 MiB `. This should reduce the number of ranges created during [restore](../v23.1/restore.html) and thereby reduce the merging of ranges that needs to occur after the restore. [#89333](https://github.com/cockroachdb/cockroach/pull/89333) | Changed default | v22.2.1 | +| SQL language change | The [cluster setting](../v23.1/cluster-settings.html) `sql.ttl.default_range_concurrency` and table storage parameter `ttl_range_concurrency` are no longer configurable. [#89392](https://github.com/cockroachdb/cockroach/pull/89392) | No longer configurable | v22.2.1 | +| SQL language change | The `sql.distsql.max_running_flows` [cluster setting](../v23.1/cluster-settings.html) has been removed. [#84888](https://github.com/cockroachdb/cockroach/pull/84888) | Removed | None | +| Operational change | The [cluster settings](../v23.1/cluster-settings.html) `server.web_session.purge.period` and `server.web_session.purge.max_deletions_per_cycle`, which were specific to the cleanup function for `system.web_sessions`, have been replaced by `server.log_gc.period` and `server.log_gc.max_deletions_per_cycle` which apply to the cleanup function for `system.eventlog`, `system.rangelog` and `system.web_sessions` equally. [#90789](https://github.com/cockroachdb/cockroach/pull/90789) | Removed, repurposed | None | +| Operational change | The [cluster setting](../v23.1/cluster-settings.html) `server.web_session.auto_logout.timeout` has been removed. [#90789](https://github.com/cockroachdb/cockroach/pull/90789) | Removed, defaults to true | None | +| {{ site.data.products.enterprise }} edition change | The `changefeed.active_protected_timestamps.enabled` [[cluster setting](../v23.1/cluster-settings.html)](../v23.1/cluster-settings.html) has been removed and is now always treated as if it was `true`. [#89975](https://github.com/cockroachdb/cockroach/pull/89975) | Changed default | None | +| {{ site.data.products.enterprise }} edition change | Increased the default `changefeed.memory.per_changefeed_limit` [cluster setting](../v23.1/cluster-settings.html) from `128MiB` to `512MiB`. This should result in changefeeds being able to produce larger files. [#96340](https://github.com/cockroachdb/cockroach/pull/96340) | Changed default | None | +| Operational change | The [load-based splitter](https://www.cockroachlabs.com/docs/v23.1/load-based-splitting) now supports using request CPU usage to split ranges. This is introduced with the previous cluster setting `kv.allocator.load_based_rebalancing.objective`, which when set to `cpu`, will use request CPU usage. The threshold above which CPU usage of a range is considered for splitting is defined in the cluster setting `kv.range_split.load_cpu_threshold`, which has a default value of `250ms`. (Relates to #100211 in this table.) [#96128](https://github.com/cockroachdb/cockroach/pull/96128) | Repurposed | None | +| Operational change | The `kv.range_split.load_cpu_threshold` [cluster setting](../v23.1/cluster-settings.html#setting-kv-range-split-load-cpu-threshold) now has a minimum setting value of `10ms`. Previously there was no minimum so, while unlikely, this could have an impact if you had chosen a custom setting lower than the established minimum. [#98250](https://github.com/cockroachdb/cockroach/pull/98250) | New minimum | None | +| Security update | The new [cluster setting](/docs/v23.1/cluster-settings.html) `server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled`, which allows you to migrate passwords from SCRAM to bcrypt during user authentication, defaults to `true`. If it is `true` and if `server.user_login.password_encryption` is `crdb-bcrypt`, then during login, the stored hashed password will be migrated from SCRAM to bcrypt. [#97429](https://github.com/cockroachdb/cockroach/pull/97429) | New setting | v22.2.6 | +| Security update | The default value for the `server.user_login.password_hashes.default_cost.scram_sha_256` [cluster setting](../v23.1/cluster-settings.html) is now 10610. (Previously the default was 119680.) The old value was found to have been too high for many types of client hardware, and in some cases could cause regressions in connection latency. The new value was chosen by running tests with clients that have 1 or 2 vCPUs provisioned. Additionally, the new cluster setting `server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled` was added, and defaults to `true`. If it is `true` and the stored SCRAM password for a user has a different cost than the configured default cost, then the next time the user logs in, their password will automatically be rehashed using the configured default cost. If the rehashing is not desired, then operators should update the `server.user_login.password_hashes.default_cost.scram_sha_256` cluster setting to the value they desire before upgrading. This change is being backported to [v22.2](../releases/v22.2.html). [#98254](https://github.com/cockroachdb/cockroach/pull/98254) | Changed default | v22.2.7 | +| Command-line change | The `--drain-wait` argument to the [`cockroach node drain`](../v23.1/cockroach-node.html) command will be automatically increased if the command detects that it is smaller than the sum of the [cluster settings](../v23.1/node-shutdown.html#cluster-settings) `server.shutdown.drain_wait`, `server.shutdown.connection_wait`, `server.shutdown.query_wait` times two, and `server.shutdown.lease_transfer_wait`. If the `--drain-wait` argument is 0, then no timeout is used. This recommendation [was already documented](../v23.1/node-shutdown.html#drain-timeout), but now the advice will be applied automatically. [#98390](https://github.com/cockroachdb/cockroach/pull/98390) | New effect | v22.2.1 | +| Bug fix | RPC connections between nodes now require RPC connections to be established in both directions, otherwise the connection will be closed. This is done to prevent asymmetric network partitions where nodes are able to send outbound messages but not receive inbound messages, which could result in persistent unavailability. This behavior can be disabled by the [cluster setting](../v23.1/cluster-settings.html) `rpc.dialback.enabled`. [#94778](https://github.com/cockroachdb/cockroach/pull/94778) | New setting, enabled by default | None | +| Bug fix | Fixed a rare bug introduced in v22.2.0 that could cause a node to crash with an `attempting to append refresh spans after the tracked timestamp has moved forward` error when querying virtual tables in the [`crdb_internal`](../v23.1/crdb-internal.html) or [`pg_catalog`](../v23.1/pg-catalog.html) system catalogs. If you are experiencing this bug, set the `sql.distsql.use_streamer.enabled` [cluster setting](../v23.1/cluster-settings.html) to `false` before upgrading a cluster to v23.1. [#99443](https://github.com/cockroachdb/cockroach/pull/99443) | New guidance | v22.2.8 | +| Bug fix | The [**Hot Ranges** page](../v23.1/ui-hot-ranges-page.html) DB Console page would show hot ranges by CPU and not QPS (queries per second), depending on the value of the `kv.allocator.load_based_rebalancing.objective` [cluster setting](../v23.1/cluster-settings.html) (default `cpu`). Now the page will always collect statistics based on QPS. (Relates to #96128 in this table.) [#100211](https://github.com/cockroachdb/cockroach/pull/100211) | Repurposed setting | No | + +

Deprecations

+ +tk + +

Known limitations

+ +For information about new and unresolved limitations in CockroachDB v23.1, with suggested workarounds where applicable, see [Known Limitations](../v23.1/known-limitations.html). + +

Additional resources

+ +Resource | Topic | Description +---------------------+--------------------------------------------+------------- +Cockroach University | [Getting Started with SQL for Application Developers](https://university.cockroachlabs.com/courses/course-v1:crl+getting-started-with-sql-for-app-devs+self-paced/about) | In this course, you will learn some basic, single-table, SQL operations. Starting from a business use case, you will learn how to translate a simple entity/object into a corresponding database table. From there, you will see how you can populate that table with data and retrieve it afterward. By the end of the course, you should feel comfortable with taking your own simple entities, mapping them to your relational database, and performing basic queries. +Cockroach University | [Modeling Object Relationships in SQL](https://university.cockroachlabs.com/courses/course-v1:crl+modeling-object-relationships-in-sql+preview/about)
(Preview) | In this course, you will learn to map your business critical data from your application code to a SQL database efficiently and elegantly, and learn key SQL features to help minimize application complexity. +Cockroach University | [Getting Started with Node.js and node-postgres](https://university.cockroachlabs.com/courses/course-v1:crl+getting-started-with-nodejs-node-postgres+self-paced/about) | In this course, you will learn how to properly use CockroachDB inside of a simple microservice. You will start with a pre-built microservice and add the necessary components to communicate with the database using the node-postgres driver. +Cockroach University | [Intro to Multi-Region Databases in Geo-distributed Applications](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-multi-region+v1/about)
(Preview) | This course will introduce simple, elegant, and practical solutions for designing a database that optimizes for resilience, responsiveness while also being sensitive to data locality. +Docs | [Migration Overview](../v23.1/migration-overview.html) | This page summarizes the steps of migrating a database to CockroachDB, which include testing and updating your schema to work with CockroachDB, moving your data into CockroachDB, and testing and updating your application. +Docs | [Unsupported Features in CockroachDB Serverless](../cockroachcloud/serverless-unsupported-features.html) | This page describes the features that are either unsupported or partially supported in CockroachDB serverless clusters +Docs | [Sample apps with ccloud](../v23.1/build-a-nodejs-app-with-cockroachdb.html?filters=ccloud) | Sample application docs now includes steps to create a {{ site.data.products.serverless }} cluster using the `ccloud` CLI tool. +Docs | [API Support Policy](../v23.1/api-support-policy.html) | This page includes the following information: our API support policies, our definitions of backward-incompatible and backward-compatible changes, and a summary of APIs that CockroachDB makes available. +Docs | [CockroachDB Kubernetes Operator release notes](../releases/kubernetes-operator.html) | The CockroachDB Kubernetes Operator-specific release notes are now surfaced on this page. +Docs | [HashiCorp Vault tutorial](../v23.1/vault-db-secrets-tutorial.html) | This pages reviews the supported integrations between CockroachDB and HashiCorp's Vault, which offers tooling to extend CockroachDB's data security capabilities. +Docs | [Backup architecture](../v23.1/backup-architecture.html) | This page describes the backup job workflow with a high-level overview, diagrams, and more details on each phase of the job. diff --git a/css/customstyles.scss b/css/customstyles.scss index 856a0cff205..f8a3d401cb9 100755 --- a/css/customstyles.scss +++ b/css/customstyles.scss @@ -101,4 +101,13 @@ table { width: 100%; display: block; border: none; + tbody { + tr { + td { + strong { + line-height: 22px; + } + } + } + } } diff --git a/v23.1/upgrade-cockroach-version.md b/v23.1/upgrade-cockroach-version.md index 9a7950909d4..50cde4a687b 100644 --- a/v23.1/upgrade-cockroach-version.md +++ b/v23.1/upgrade-cockroach-version.md @@ -63,6 +63,12 @@ When you are ready to upgrade to {{ latest.release_name }}, continue to [step 2] Before starting the upgrade, complete the following steps. +### Review breaking changes + +{% assign rd = site.data.versions | where_exp: "rd", "rd.major_version == page.version.version" | first %} + +Review the [backward-incompatible changes](../releases/{{ page.version.version }}.html{% unless rd.release_date == "N/A" or rd.release_date > today %}#{{ page.version.version | replace: ".", "-" }}-0-backward-incompatible-changes{% endunless %}), [deprecated features](../releases/{{ page.version.version }}.html#{% unless rd.release_date == "N/A" or rd.release_date > today %}{{ page.version.version | replace: ".", "-" }}-0-deprecations{% endunless %}), and [key cluster setting changes](../releases/{{ page.version.version }}.html#{% unless rd.release_date == "N/A" or rd.release_date > today %}{{ page.version.version | replace: ".", "-" }}-0-cluster-settings{% endunless %}) in {{ page.version.version }}. If any affect your deployment, make the necessary changes before starting the rolling upgrade to {{ page.version.version }}. + ### Check load balancing Make sure your cluster is behind a [load balancer](recommended-production-settings.html#load-balancing), or your clients are configured to talk to multiple nodes. If your application communicates with a single node, stopping that node to upgrade its CockroachDB binary will cause your application to fail. @@ -99,12 +105,6 @@ If your cluster contains partially-decommissioned nodes, they will block an upgr 1. First, reissue the [decommission command](node-shutdown.html?filters=decommission#decommission-the-node). The second command typically succeeds within a few minutes. 1. If the second decommission command does not succeed, [recommission](node-shutdown.html?filters=decommission#recommission-nodes) and then decommission it again. Before continuing the upgrade, the node must be marked as `decommissioned`. -### Review breaking changes - -{% assign rd = site.data.versions | where_exp: "rd", "rd.major_version == page.version.version" | first %} - -Review the [backward-incompatible changes in {{ page.version.version }}](../releases/{{ page.version.version }}.html{% unless rd.release_date == "N/A" or rd.release_date > today %}#{{ page.version.version | replace: ".", "-" }}-0-backward-incompatible-changes{% endunless %}) and [deprecated features](../releases/{{ page.version.version }}.html#{% unless rd.release_date == "N/A" or rd.release_date > today %}{{ page.version.version | replace: ".", "-" }}-0-deprecations{% endunless %}). If any affect your deployment, make the necessary changes before starting the rolling upgrade to {{ page.version.version }}. - ## Step 3. Decide how the upgrade will be finalized {{site.data.alerts.callout_info}} From 8c88fe9f68f7981092307e64824f632ab9825a8e Mon Sep 17 00:00:00 2001 From: mikeCRL Date: Fri, 12 May 2023 02:21:57 -0400 Subject: [PATCH 02/18] Fix headings and anchors --- _includes/releases/v23.1/v23.1.0.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 0003239e24d..88f5712dea7 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -175,7 +175,7 @@ Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simp -

Security & Compliance

+

Security and compliance

@@ -225,7 +225,7 @@ Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simp
-

IO & Recovery

+

Recovery and I/O

Change data capture (Changefeeds)
From ec133074cc99507718f08fe41dfc6d8f950948e0 Mon Sep 17 00:00:00 2001 From: mikeCRL Date: Fri, 12 May 2023 02:26:52 -0400 Subject: [PATCH 03/18] Fix code sample --- _includes/releases/v23.1/v23.1.0.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 88f5712dea7..2c97db85af0 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -108,7 +108,11 @@ UDFs can now also be used in Changefeed expressions (Enterprise) and CHECK

You can now create your own composite data types, in addition to our previous support for enumerated data types.

-For example: +For example:

+CREATE TYPE t AS (a INT, b INT);
+CREATE TABLE a (a t);
+INSERT INTO a VALUES((1, 2))
+SELECT (a).b FROM a
From eb2741c596e96b155d3eb0b8d5d8829d0214af55 Mon Sep 17 00:00:00 2001 From: mikeCRL Date: Fri, 12 May 2023 07:53:15 -0400 Subject: [PATCH 04/18] Disaster Recovery PM updates --- _includes/releases/v23.1/v23.1.0.md | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 2c97db85af0..02cc5e2803b 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -305,23 +305,31 @@ With the move from Preview to GA, this feature brings many new capabilities, suc - Backup/Restore: Restrict to a specific set of nodes, or locality + Support longer incremental backup chains + +

We now support up to 400 incremental backups, an 8x increase, enabling you to preserve your data even more effectively (reducing RPO) while being more cost-efficient. Incremental backups contain only the data that has changed since the last backup, so they are smaller and faster to produce. -

You can now restrict backup execution to a specific region/locality. Only the nodes in the specified locality filter will execute the backup, and will need access to the backup storage bucket. + + + Restrict backups to a locality + +

You can now restrict backup execution to a specific region/locality. Only the nodes in the specified locality filter will execute the backup, so only those nodes will need access to the backup storage bucket.

If the node executing the backup does not have a specific range to be backed up, it will read it from the closest replica it can. - Support encrypted backups with keys stored in Azure Key Vault + Support implicit authentication on Azure -

You can now take and restore encrypted backups using RSA keys stored in Azure Key Vault. +

You can now use implicit authentication on Azure for cloud storage authentication. +

+If the node executing the backup does not have a specific range to be backed up, it will read it from the closest replica it can. - + - Support longer incremental backup chains + Support encrypted backups with keys stored in Azure Key Vault -

We now support up to 400 incremental backups, an 8x increase, enabling you to preserve your data even more effectively (reducing RPO) while being more cost-efficient. Incremental backups contain only the data that has changed since the last backup, so they are smaller and faster to produce. +

You can now take and restore encrypted backups using RSA keys stored in Azure Key Vault. From 75f67ea381662caf12dd1ab3abe679ff88fd1460 Mon Sep 17 00:00:00 2001 From: mikeCRL Date: Fri, 12 May 2023 09:04:23 -0400 Subject: [PATCH 05/18] Cluster settings context, etc --- _includes/releases/v23.1/v23.1.0.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 02cc5e2803b..3b292e2dc66 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -118,13 +118,13 @@ SELECT (a).b FROM a Declarative schema changer supports user-defined functions UDFs -

The statements CREATE FUNCTION and DROP FUNCTION are now supported by the declarative schema changer. +

The statements CREATE FUNCTION and DROP FUNCTION are now supported by the declarative schema changer. Declarative schema changer fully supports constraints -

The statements ALTER TABLE...ADD CONSTRAINT and ALTER TABLE...DROP CONSTRAINT are now supported by the declarative schema changer. +

The statements ALTER TABLE...ADD CONSTRAINT and ALTER TABLE...DROP CONSTRAINT are now supported by the declarative schema changer. Add configurable setting to adjust grant lease options @@ -468,6 +468,8 @@ Before [upgrading to CockroachDB v23.1](../v23.1/upgrade-cockroach-version.html)

Key Cluster Setting Changes

+The following changes should be reviewed prior to upgrading. Default cluster settings will be used unless you have manually set a value for a setting. This can be confirmed by checking the `system.settings` table (`select * from system.settings`) to view the non-default settings. + | Category | Description | Change Type | Backport version | |---|---|---|---| | SQL language change | The `backup.restore_span.target_size` cluster setting now defaults to `384 MiB `. This should reduce the number of ranges created during [restore](../v23.1/restore.html) and thereby reduce the merging of ranges that needs to occur after the restore. [#89333](https://github.com/cockroachdb/cockroach/pull/89333) | Changed default | v22.2.1 | From eda0bfa64a45897040cf59b9ed4448c2bfb4d8cb Mon Sep 17 00:00:00 2001 From: Nick Vigilante Date: Fri, 12 May 2023 11:15:41 -0400 Subject: [PATCH 06/18] fixes to includes/Liquid --- _includes/releases/v23.1/v23.1.0-alpha.4.md | 2 +- .../v23.1/performance/sql-trace-txn-enable-threshold.md | 7 ++++++- _includes/v23.1/ui/insights.md | 4 ++++ cockroachcloud/insights-page.md | 2 +- cockroachcloud/metrics-page.md | 2 +- 5 files changed, 13 insertions(+), 4 deletions(-) diff --git a/_includes/releases/v23.1/v23.1.0-alpha.4.md b/_includes/releases/v23.1/v23.1.0-alpha.4.md index a606479e2a5..c56db517ea4 100644 --- a/_includes/releases/v23.1/v23.1.0-alpha.4.md +++ b/_includes/releases/v23.1/v23.1.0-alpha.4.md @@ -9,7 +9,7 @@ Release Date: February 27, 2023 - Some of the transformations specific to changefeeds have been deprecated and replaced. These functions were released in limited access in v22.2. Deprecated changefeed transformations continue to function. Closely monitor changefeeds that are created during upgrade. While effort was made to maintain backward compatibility, the updated changefeed transformation may produce slightly different output, such as different column names. [#96295][#96295] - Add support for implicit authentication to Azure Storage and KMS. [#96825][#96825] - Add support for `CREATE EXTERNAL CONNECTION ... AS "postgresql://"` or `"postgres://"`. These external connections may be specified as the source in streaming replication. [#96551][#96551] -- Add support for referencing [user defined functions (UDFs)](../{{site.versions["stable"]}}/user-defined-functions.html) from other objects. Backup and restore operations can now read and write UDF descriptors. [#97038][#97038] +- Add support for referencing [user defined functions (UDFs)](../v23.1/user-defined-functions.html) from other objects. Backup and restore operations can now read and write UDF descriptors. [#97038][#97038]

SQL language changes

diff --git a/_includes/v23.1/performance/sql-trace-txn-enable-threshold.md b/_includes/v23.1/performance/sql-trace-txn-enable-threshold.md index 723f9075d85..effcda2bdbd 100644 --- a/_includes/v23.1/performance/sql-trace-txn-enable-threshold.md +++ b/_includes/v23.1/performance/sql-trace-txn-enable-threshold.md @@ -1 +1,6 @@ -The default tracing behavior captures a small percent of transactions, so not all contention events will be recorded. When investigating transaction contention, you can set the `sql.trace.txn.enable_threshold` [cluster setting](cluster-settings.html#setting-sql-trace-txn-enable-threshold) to always capture contention events. \ No newline at end of file +{% if include.version_prefix != nil %} + {% assign url = include.version_prefix | append: "cluster-settings.html#setting-sql-trace-txn-enable-threshold" | absolute_url %} +{% else %} + {% assign url = "cluster-settings.html#setting-sql-trace-txn-enable-threshold" %} +{% endif %} +The default tracing behavior captures a small percent of transactions, so not all contention events will be recorded. When investigating transaction contention, you can set the `sql.trace.txn.enable_threshold` [cluster setting]({{ url }}) to always capture contention events. diff --git a/_includes/v23.1/ui/insights.md b/_includes/v23.1/ui/insights.md index ad84fe59643..900eafa92a0 100644 --- a/_includes/v23.1/ui/insights.md +++ b/_includes/v23.1/ui/insights.md @@ -84,7 +84,11 @@ To display this view, click **Insights** in the left-hand navigation of the Clou The rows in this page are populated from the [`crdb_internal.cluster_execution_insights`]({{ link_prefix }}crdb-internal.html) table. - The results displayed on the **Statement Executions** view will be available as long as the number of rows in each node is less than the [`sql.insights.execution_insights_capacity` cluster setting]({{ link_prefix }}cluster-settings.html#setting-sql-insights-execution-insights-capacity). +{% if include.version_prefix != nil %} +- {% include {{ include.version_prefix }}performance/sql-trace-txn-enable-threshold.md version_prefix=version_prefix %} +{% else %} - {% include {{ page.version.version }}/performance/sql-trace-txn-enable-threshold.md %} +{% endif %} {{site.data.alerts.end}} {% if page.cloud != true -%} diff --git a/cockroachcloud/insights-page.md b/cockroachcloud/insights-page.md index e8da3c02b9e..bc9c7062207 100644 --- a/cockroachcloud/insights-page.md +++ b/cockroachcloud/insights-page.md @@ -23,7 +23,7 @@ The **Insights** page of the {{ site.data.products.db }} Console helps you: To view this page, select a cluster from the [**Clusters** page](cluster-management.html#view-clusters-page), and click **Insights** in the **Monitoring** section of the left side navigation. -{% include {{version_prefix}}ui/insights.md %} +{% include {{version_prefix}}ui/insights.md version_prefix=version_prefix %} ## See also diff --git a/cockroachcloud/metrics-page.md b/cockroachcloud/metrics-page.md index eec3d4a0e8e..0b22b62c203 100644 --- a/cockroachcloud/metrics-page.md +++ b/cockroachcloud/metrics-page.md @@ -72,7 +72,7 @@ This graph shows a moving average of the number of statements with [full table a ### SQL Statement Contention -This graph shows a moving average of the number of SQL statements that experienced [contention](../{{site.versions["stable"]}}/transactions.html#transaction-contention) across the cluster. +This graph shows a moving average of the number of SQL statements that experienced [contention](../{{site.versions["stable"]}}/performance-best-practices-overview.html#transaction-contention) across the cluster. See the [Statements page](statements-page.html) for more details on the cluster's SQL statements. From fb610d30d94df6514a50362cca48f1cec5679de2 Mon Sep 17 00:00:00 2001 From: shannonbradshaw Date: Fri, 12 May 2023 12:10:53 -0400 Subject: [PATCH 07/18] Cockroach University updates for additional resources section --- _includes/releases/v23.1/v23.1.0.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 3b292e2dc66..b4e389d4d20 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -500,10 +500,10 @@ For information about new and unresolved limitations in CockroachDB v23.1, with Resource | Topic | Description ---------------------+--------------------------------------------+------------- -Cockroach University | [Getting Started with SQL for Application Developers](https://university.cockroachlabs.com/courses/course-v1:crl+getting-started-with-sql-for-app-devs+self-paced/about) | In this course, you will learn some basic, single-table, SQL operations. Starting from a business use case, you will learn how to translate a simple entity/object into a corresponding database table. From there, you will see how you can populate that table with data and retrieve it afterward. By the end of the course, you should feel comfortable with taking your own simple entities, mapping them to your relational database, and performing basic queries. -Cockroach University | [Modeling Object Relationships in SQL](https://university.cockroachlabs.com/courses/course-v1:crl+modeling-object-relationships-in-sql+preview/about)
(Preview) | In this course, you will learn to map your business critical data from your application code to a SQL database efficiently and elegantly, and learn key SQL features to help minimize application complexity. -Cockroach University | [Getting Started with Node.js and node-postgres](https://university.cockroachlabs.com/courses/course-v1:crl+getting-started-with-nodejs-node-postgres+self-paced/about) | In this course, you will learn how to properly use CockroachDB inside of a simple microservice. You will start with a pre-built microservice and add the necessary components to communicate with the database using the node-postgres driver. -Cockroach University | [Intro to Multi-Region Databases in Geo-distributed Applications](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-multi-region+v1/about)
(Preview) | This course will introduce simple, elegant, and practical solutions for designing a database that optimizes for resilience, responsiveness while also being sensitive to data locality. +Cockroach University | [Introduction to Distributed SQL and CockroachDB](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) | This course introduces the core concepts behind distributed SQL databases and describes how CockroachDB fits into this landscape. You will learn what differentiates CockroachDB from both legacy SQL and NoSQL databases and how CockroachDB ensures consistent transactions without sacrificing scale and resiliency. You'll learn about CockroachDB's seamless horizontal scalability, distributed transactions with strict ACID guarantees, and high availability and resilience. +Cockroach University | [Practical First Steps with CockroachDB](https://university.cockroachlabs.com/courses/course-v1:crl+practical-first-steps-with-crdb+self-paced/about) | This course will give you the tools you need to get started with CockroachDB. During the course, you will learn how to spin up a cluster, use the Admin UI to monitor cluster activity, and use SQL shell to solve a set of hands-on exercises. +Cockroach University | [Building a Highly Resilient Multi-region Database using CockroachDB](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-resilience-in-multi-region+self-paced/about) | This course is part of a series introducing solutions to running low-latency, highly resilient applications for data-intensive workloads on CockroachDB. In this course we focus on surviving large-scale infrastructure failures like losing an entire cloud region without losing data during recovery. We’ll show you how to use CockroachDB survival goals in a multi-region cluster to implement a highly resilient database that survives node or network failures across multiple regions with zero data loss. +Cockroach University | [Introduction to Serverless Databases and CockroachDB Serverless](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-serverless+self-paced/about) | This course introduces the core concepts behind serverless databases and gives you the tools you need to get started with CockroachDB Serverless. You will learn how serverless databases remove the burden of configuring, sizing, provisioning, securing, maintaining and dynamically scaling your database based on load. This means you simply pay for the serverless database resources you use. Docs | [Migration Overview](../v23.1/migration-overview.html) | This page summarizes the steps of migrating a database to CockroachDB, which include testing and updating your schema to work with CockroachDB, moving your data into CockroachDB, and testing and updating your application. Docs | [Unsupported Features in CockroachDB Serverless](../cockroachcloud/serverless-unsupported-features.html) | This page describes the features that are either unsupported or partially supported in CockroachDB serverless clusters Docs | [Sample apps with ccloud](../v23.1/build-a-nodejs-app-with-cockroachdb.html?filters=ccloud) | Sample application docs now includes steps to create a {{ site.data.products.serverless }} cluster using the `ccloud` CLI tool. From 0c233ede1af9f9ee69a69ac6172e46b57219a743 Mon Sep 17 00:00:00 2001 From: ianjevans Date: Fri, 12 May 2023 15:12:25 -0700 Subject: [PATCH 08/18] Add link to asyncpg docs; clean up URLs --- _includes/releases/v23.1/v23.1.0.md | 38 ++++++++++++++--------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index b4e389d4d20..9b3c8b7584b 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -54,14 +54,14 @@ The features highlighted below are freely available in {{ site.data.products.cor Full-text search using TSVector and TSQuery -

A full-text search is used to perform queries on natural-language documents such as articles, websites, or other written formats, with results often sorted by relevance.

+

A full-text search is used to perform queries on natural-language documents such as articles, websites, or other written formats, with results often sorted by relevance.

You can rely on new built-in functions to make use of the new TSVECTOR and TSQUERY data types.

Improved developer experience for multi-region apps -

If you have functionality that requires low latency and cannot tolerate delays between regions, you can enable the enforce_home_region option, which ensures that queries are executed within a single region. If a query doesn't have a home region or is running outside of its home region, the optimizer now provides improved feedback and suggestions for executing the query within a single region. +

If you have functionality that requires low latency and cannot tolerate delays between regions, you can enable the enforce_home_region option, which ensures that queries are executed within a single region. If a query doesn't have a home region or is running outside of its home region, the optimizer now provides improved feedback and suggestions for executing the query within a single region. @@ -73,13 +73,13 @@ The features highlighted below are freely available in {{ site.data.products.cor Redact PII from statement bundles -

Leverage statement bundles for debugging without introducing data privacy concerns. You can now redact personally identifiable information (PII) from statement bundles for PCI compliance. +

Leverage statement bundles for debugging without introducing data privacy concerns. You can now redact personally identifiable information (PII) from statement bundles for PCI compliance. User-Defined Function (UDF) enhancements -

User-defined functions offer enhanced flexibility, performance, and reusability. This release brings a number of UDF enhancements, including: Inlining of supported UDFs within the query plan to improve performance; support for subqueries in statements, support for expressions with a * such as SELECT * , and support for returning a set of results (using SETOF). +

User-defined functions offer enhanced flexibility, performance, and reusability. This release brings a number of UDF enhancements, including: Inlining of supported UDFs within the query plan to improve performance; support for subqueries in statements, support for expressions with a * such as SELECT * , and support for returning a set of results (using SETOF).

UDFs can now also be used in Changefeed expressions (Enterprise) and CHECK constraints, and referenced from other objects. Validations have been added to guarantee that all statements in the function body should be as strict as the expected UDF volatility. UDFs are also now included in backup and restore operations. @@ -106,7 +106,7 @@ UDFs can now also be used in Changefeed expressions (Enterprise) and CHECK Support user-defined composite types -

You can now create your own composite data types, in addition to our previous support for enumerated data types. +

You can now create your own composite data types, in addition to our previous support for enumerated data types.

For example:

 CREATE TYPE t AS (a INT, b INT);
@@ -118,13 +118,13 @@ SELECT (a).b FROM a
Declarative schema changer supports user-defined functions UDFs -

The statements CREATE FUNCTION and DROP FUNCTION are now supported by the declarative schema changer. +

The statements CREATE FUNCTION and DROP FUNCTION are now supported by the declarative schema changer. Declarative schema changer fully supports constraints -

The statements ALTER TABLE...ADD CONSTRAINT and ALTER TABLE...DROP CONSTRAINT are now supported by the declarative schema changer. +

The statements ALTER TABLE...ADD CONSTRAINT and ALTER TABLE...DROP CONSTRAINT are now supported by the declarative schema changer. Add configurable setting to adjust grant lease options @@ -154,7 +154,7 @@ For more information about this setting and how it works, see the Limita New SQL shell editor with tab completion -

The SQL shell has a new user interface that allows tab completion and more advanced navigation, streamlining developer workflows. +

The SQL shell has a new user interface that allows tab completion and more advanced navigation, streamlining developer workflows.

After pressing tab, you can navigate database objects, keywords, and functions using arrow keys, pressing tab again to select one and return to the console. You can also use pattern matching to filter these entities and find what you need faster. @@ -163,9 +163,9 @@ After pressing tab, you can navigate database objects, keywords, and functions u Support multiple active portals (Preview) -

The multiple active portals feature of the Postgres wire protocol (pgwire) is available, with limitations. This allows for more efficient data retrieval by reducing the number of roundtrips required between the client and server. +

The multiple active portals feature of the Postgres wire protocol (pgwire) is available, with limitations. This allows for more efficient data retrieval by reducing the number of roundtrips required between the client and server.

-Third-party tools such as asyncpg use this feature to implement efficient asynchronous communication between the PostgreSQL server and client. This can allow for faster, more scalable applications that can handle large amounts of data without slowing down the user experience. +Third-party tools such as asyncpg use this feature to implement efficient asynchronous communication between the PostgreSQL server and client. This can allow for faster, more scalable applications that can handle large amounts of data without slowing down the user experience. @@ -211,7 +211,7 @@ Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simp Support encrypted backups with keys stored in Azure Key Vault -

You can now take and restore encrypted backups using RSA keys stored in Azure Key Vault. +

You can now take and restore encrypted backups using RSA keys stored in Azure Key Vault. @@ -223,7 +223,7 @@ Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simp New fine-grained system privilege to view all jobs -

The new VIEWJOB system privilege allows a user to view all jobs when running commands like SHOW JOBS, without granting additional capabilities. This helps ensure alignment with the principle of least privilege by removing the need to assign broader privileges or role options like CONTROLJOB. +

The new VIEWJOB system privilege allows a user to view all jobs when running commands like SHOW JOBS, without granting additional capabilities. This helps ensure alignment with the principle of least privilege by removing the need to assign broader privileges or role options like CONTROLJOB. @@ -244,13 +244,13 @@ Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simp Create scheduled exports using changefeeds -

Changefeeds can offer benefits over existing export functionality for customers who need higher levels of scale and observability. You can now create changefeed exports on a schedule, similar to the scheduling feature for backups. This extends our existing Changefeeds as Export functionality. +

Changefeeds can offer benefits over existing export functionality for customers who need higher levels of scale and observability. You can now create changefeed exports on a schedule, similar to the scheduling feature for backups. This extends our existing Changefeeds as Export functionality. Use a webhook as a changefeed sink -

The use of a webhook sink to deliver changefeed messages to an arbitrary HTTPS endpoint has been promoted from Preview to GA. +

The use of a webhook sink to deliver changefeed messages to an arbitrary HTTPS endpoint has been promoted from Preview to GA. @@ -262,13 +262,13 @@ Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simp CDC Queries -

CDC Queries (Formerly CDC Transformations) have been promoted from Preview to GA. +

CDC Queries (Formerly CDC Transformations) have been promoted from Preview to GA. Use External Connections (GA) to remove a data exfiltration vector -

Use external connections to specify and interact with resources that are external from CockroachDB. With CREATE EXTERNAL CONNECTION, you define a name for an external connection while passing the provider URI and query parameters. BACKUP, RESTORE, IMPORT, EXPORT, and CREATE CHANGEFEED queries can interact with the defined external connection instead of a required, provider-specific URI. As a result, you can decouple the management and permissions of the external resource from the operation in which you're using them. +

Use external connections to specify and interact with resources that are external from CockroachDB. With CREATE EXTERNAL CONNECTION, you define a name for an external connection while passing the provider URI and query parameters. BACKUP, RESTORE, IMPORT, EXPORT, and CREATE CHANGEFEED queries can interact with the defined external connection instead of a required, provider-specific URI. As a result, you can decouple the management and permissions of the external resource from the operation in which you're using them.

With the move from Preview to GA, this feature brings many new capabilities, such as fine-grained permission and support for schema registries, webhook and GC PubSub sinks, and the SHOW command. @@ -288,7 +288,7 @@ With the move from Preview to GA, this feature brings many new capabilities, suc Support Oauth authentication protocol for changefeeds -

Oauth authentication uses a third-party software provider to authenticate with Kafka instead of providing CockroachDB with direct access to Kafka cluster credentials. The third-party authentication server provides a temporary credential token that CockroachDB then uses to connect to a customer’s Kafka cluster. This represents a security best practice, allowing users to authenticate without directly storing or sharing their credentials. +

Oauth authentication uses a third-party software provider to authenticate with Kafka instead of providing CockroachDB with direct access to Kafka cluster credentials. The third-party authentication server provides a temporary credential token that CockroachDB then uses to connect to a customer’s Kafka cluster. This represents a security best practice, allowing users to authenticate without directly storing or sharing their credentials. @@ -321,7 +321,7 @@ If the node executing the backup does not have a specific range to be backed up, Support implicit authentication on Azure -

You can now use implicit authentication on Azure for cloud storage authentication. +

You can now use implicit authentication on Azure for cloud storage authentication.

If the node executing the backup does not have a specific range to be backed up, it will read it from the closest replica it can. @@ -329,7 +329,7 @@ If the node executing the backup does not have a specific range to be backed up, Support encrypted backups with keys stored in Azure Key Vault -

You can now take and restore encrypted backups using RSA keys stored in Azure Key Vault. +

You can now take and restore encrypted backups using RSA keys stored in Azure Key Vault. From 7ba4e107be60e6b348b170b0e6f8cec58a48d852 Mon Sep 17 00:00:00 2001 From: ianjevans Date: Fri, 12 May 2023 16:59:33 -0700 Subject: [PATCH 09/18] Fix link to asyncpg tutorial --- _includes/releases/v23.1/v23.1.0.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 9b3c8b7584b..004d7e8aeb8 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -165,7 +165,7 @@ After pressing tab, you can navigate database objects, keywords, and functions u

The multiple active portals feature of the Postgres wire protocol (pgwire) is available, with limitations. This allows for more efficient data retrieval by reducing the number of roundtrips required between the client and server.

-Third-party tools such as asyncpg use this feature to implement efficient asynchronous communication between the PostgreSQL server and client. This can allow for faster, more scalable applications that can handle large amounts of data without slowing down the user experience. +Third-party tools such as asyncpg use this feature to implement efficient asynchronous communication between the PostgreSQL server and client. This can allow for faster, more scalable applications that can handle large amounts of data without slowing down the user experience. From 6ce526ae57efebac89c975fe91d40403783146cc Mon Sep 17 00:00:00 2001 From: mikeCRL Date: Sun, 14 May 2023 14:55:42 -0400 Subject: [PATCH 10/18] Fixed table formatting and converted to relative links --- _includes/releases/v23.1/v23.1.0.md | 175 ++++++++++++++-------------- 1 file changed, 90 insertions(+), 85 deletions(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 004d7e8aeb8..523ef29862a 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -2,9 +2,9 @@ Release Date: May 15, 2023 -With the release of CockroachDB v23.1, we've added new capabilities in CockroachDB to help you build, optimize, and operate more effectively and efficiently. Check out a [summary of the most significant user-facing changes](#v23-1-0-feature-highlights) and then [upgrade to CockroachDB v23.1](../v23.1/upgrade-cockroach-version.html). +With the release of CockroachDB v23.1, we've added new capabilities in CockroachDB to help you migrate, build, and operate more efficiently. Check out a [summary of the most significant user-facing changes](#v23-1-0-feature-highlights) and then [upgrade to CockroachDB v23.1](../v23.1/upgrade-cockroach-version.html). -For demos and videos on the new features, see the [v23.1 launch page](https://www.cockroachlabs.com/23-1-launch). +To learn more about the launch, see the [v23.1 launch page](cockroachlabs.com/whatsnew). {% include releases/release-downloads-docker-image.md release=include.release %} @@ -18,7 +18,7 @@ For demos and videos on the new features, see the [v23.1 launch page](https://ww This section summarizes the most significant user-facing changes in v23.1.0. For a complete list of features and changes, including bug fixes and performance improvements, see the [release notes](index.html#testing-releases) for previous testing releases. You can also search for [what's new in v23.1 in our docs](../search.html?query=new%20in%20v23.1). {{site.data.alerts.callout_info}} -The features highlighted below are freely available in {{ site.data.products.core }} and do not require an [enterprise license](https://www.cockroachlabs.com/get-cockroachdb/enterprise/), unless otherwise noted. [{{ site.data.products.db }} clusters](https://cockroachlabs.cloud/) include all enterprise features. You can also use [`cockroach demo`](../v23.1/cockroach-demo.html) to test enterprise features in a local, temporary cluster. +The features highlighted below are freely available in {{ site.data.products.core }} and do not require an [enterprise license](../v23.1/cockroach-demo.html) to test enterprise features in a local, temporary cluster. {{site.data.alerts.end}} - [SQL](#v23-1-0-sql) @@ -52,42 +52,42 @@ The features highlighted below are freely available in {{ site.data.products.cor - Full-text search using TSVector and TSQuery +

Full-text search using TSVector and TSQuery

-

A full-text search is used to perform queries on natural-language documents such as articles, websites, or other written formats, with results often sorted by relevance.

-

You can rely on new built-in functions to make use of the new TSVECTOR and TSQUERY data types.

+

A full-text search is used to perform queries on natural-language documents such as articles, websites, or other written formats, with results often sorted by relevance.

+

You can rely on new built-in functions to make use of the new TSVECTOR and TSQUERY data types.

- Improved developer experience for multi-region apps +

Improved developer experience for multi-region apps

-

If you have functionality that requires low latency and cannot tolerate delays between regions, you can enable the enforce_home_region option, which ensures that queries are executed within a single region. If a query doesn't have a home region or is running outside of its home region, the optimizer now provides improved feedback and suggestions for executing the query within a single region. +

If you have functionality that requires low latency and cannot tolerate delays between regions, you can enable the enforce_home_region option, which ensures that queries are executed within a single region. If a query doesn't have a home region or is running outside of its home region, the optimizer now provides improved feedback and suggestions for executing the query within a single region. - Streamline migrations with improved COPY performance +

Streamline migrations with improved COPY performance

Enhancements to the functionality behind COPY statements resulted in 2x faster migrations using AWS DMS. - Redact PII from statement bundles +

Redact PII from statement bundles

-

Leverage statement bundles for debugging without introducing data privacy concerns. You can now redact personally identifiable information (PII) from statement bundles for PCI compliance. +

Leverage statement bundles for debugging without introducing data privacy concerns. You can now redact personally identifiable information (PII) from statement bundles for PCI compliance. - User-Defined Function (UDF) enhancements +

User-Defined Function (UDF) enhancements

-

User-defined functions offer enhanced flexibility, performance, and reusability. This release brings a number of UDF enhancements, including: Inlining of supported UDFs within the query plan to improve performance; support for subqueries in statements, support for expressions with a * such as SELECT * , and support for returning a set of results (using SETOF). +

User-defined functions offer enhanced flexibility, performance, and reusability. This release brings a number of UDF enhancements, including: Inlining of supported UDFs within the query plan to improve performance; support for subqueries in statements, support for expressions with a * such as SELECT * , and support for returning a set of results (using SETOF).

UDFs can now also be used in Changefeed expressions (Enterprise) and CHECK constraints, and referenced from other objects. Validations have been added to guarantee that all statements in the function body should be as strict as the expected UDF volatility. UDFs are also now included in backup and restore operations. - DELETE FROM ... USING +

DELETE FROM ... USING

-

We have added support for the USING clause on DELETE, which allows joining multiple tables for a DELETE clause. This change is in line with PostgreSQL functionality and extends the flexibility of DELETE. +

We have added support for the USING clause on DELETE, which allows joining multiple tables for a DELETE clause. This change is in line with PostgreSQL functionality and extends the flexibility of DELETE. @@ -104,9 +104,9 @@ UDFs can now also be used in Changefeed expressions (Enterprise) and CHECK - Support user-defined composite types +

Support user-defined composite types

-

You can now create your own composite data types, in addition to our previous support for enumerated data types. +

You can now create your own enumerated data types.

For example:

 CREATE TYPE t AS (a INT, b INT);
@@ -116,26 +116,26 @@ SELECT (a).b FROM a
- Declarative schema changer supports user-defined functions UDFs +

Declarative schema changer supports user-defined functions UDFs

-

The statements CREATE FUNCTION and DROP FUNCTION are now supported by the declarative schema changer. +

The statements CREATE FUNCTION and DROP FUNCTION are now supported by the declarative schema changer. - Declarative schema changer fully supports constraints +

Declarative schema changer fully supports constraints

-

The statements ALTER TABLE...ADD CONSTRAINT and ALTER TABLE...DROP CONSTRAINT are now supported by the declarative schema changer. +

The statements ALTER TABLE...ADD CONSTRAINT and ALTER TABLE...DROP CONSTRAINT are now supported by the declarative schema changer. - Add configurable setting to adjust grant lease options +

Add configurable setting to adjust grant lease options

-

A new session variable allow_role_memberships_to_change_during_transaction has been introduced which, when true, will make granting and revoking of role memberships faster at the cost of allowing some in-progress transactions to observe the previous role membership. +

A new session variable allow_role_memberships_to_change_during_transaction has been introduced which, when true, will make granting and revoking of role memberships faster at the cost of allowing some in-progress transactions to observe the previous role membership.

By default, when granting or revoking a role from another role, the system waits until all transactions that are consulting the current set of role memberships to complete. This is done to preserve CockroachDB’s default isolation level. However, the downside of this wait is that grant and revoke will take longer than the longest currently executing transaction.

In some cases, you may not care about whether concurrent transactions will immediately see the side-effects of the role grant or revoke operation, but would instead prefer that the operation finish quickly.

-For more information about this setting and how it works, see the Limitations section of the GRANT documentation. +For more information about this setting and how it works, see the

Limitations

section of the GRANT documentation. @@ -152,26 +152,26 @@ For more information about this setting and how it works, see the Limita - New SQL shell editor with tab completion +

New SQL shell editor with tab completion

-

The SQL shell has a new user interface that allows tab completion and more advanced navigation, streamlining developer workflows. +

The SQL shell has a new user interface that allows tab completion and more advanced navigation, streamlining developer workflows.

After pressing tab, you can navigate database objects, keywords, and functions using arrow keys, pressing tab again to select one and return to the console. You can also use pattern matching to filter these entities and find what you need faster. - Support multiple active portals (Preview) - +

Support multiple active portals (Preview) +

-

The multiple active portals feature of the Postgres wire protocol (pgwire) is available, with limitations. This allows for more efficient data retrieval by reducing the number of roundtrips required between the client and server. +

The multiple active portals feature of the Postgres wire protocol (pgwire) is available in CockroachDB, with limitations. This allows for more efficient data retrieval by reducing the number of roundtrips required between the client and server.

-Third-party tools such as asyncpg use this feature to implement efficient asynchronous communication between the PostgreSQL server and client. This can allow for faster, more scalable applications that can handle large amounts of data without slowing down the user experience. +Third-party tools such as asyncpg use this feature to implement efficient asynchronous communication between the PostgreSQL server and client. This can allow for faster, more scalable applications that can handle large amounts of data without slowing down the user experience. - Full support for asyncpg +

Full support for asyncpg

-

CockroachDB now offers full support for asyncpg, a PostgreSQL database interface library designed specifically for Python's asyncio framework. It provides an efficient implementation of the PostgreSQL server binary protocol for high-performance asynchronous database applications. +

CockroachDB now offers full support for asyncpg, a PostgreSQL database interface library designed specifically for Python's asyncio framework. It provides an efficient implementation of the PostgreSQL server binary protocol for high-performance asynchronous database applications.

Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simple, flexible, and efficient adapter for working with PostgreSQL databases. This makes it ideal for handling large volumes of data and scaling applications to meet demanding performance requirements. @@ -190,40 +190,40 @@ Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simp - Improvements to the redaction of data in observability artifacts +

Improvements to the redaction of data in observability artifacts

-

We have made a number of improvements to ensure that sensitive data can be redacted in observability artifacts produced by CockroachDB, such as debug.zip and statement bundles. These improvements, available to all Self-Hosted customers, also help Cockroach Labs to comply with PCI DSS in CockroachDB Dedicated. +

We have made a number of improvements to ensure that sensitive data can be redacted in observability artifacts produced by CockroachDB, such as debug.zip and statement bundles. These improvements, available to all Self-Hosted customers, also help Cockroach Labs to comply with PCI DSS in CockroachDB Dedicated. - FIPS-ready CockroachDB binaries +

FIPS-ready CockroachDB binaries

-

FIPS-ready binaries and Docker images are available for CockroachDB 23.1.0 and above. Federal Information Processing Standards (FIPS) 140-2 is a standard used to approve cryptographic modules by the U.S. and Canadian governments for systems maintained by relevant agencies and organizations working with them for the purposes of encrypting and decrypting sensitive data. +

FIPS-ready binaries and Docker images are available for CockroachDB 23.1.0 and above. Federal Information Processing Standards (FIPS) 140-2 is a standard used to approve cryptographic modules by the U.S. and Canadian governments for systems maintained by relevant agencies and organizations working with them for the purposes of encrypting and decrypting sensitive data.

FIPS-ready CockroachDB binaries are designed for workloads that require FIPS 140-2. FIPS-ready CockroachDB delegates cryptographic operations to the OpenSSL library available on the host operating system, rather than Go's cryptographic libraries. We recommend that OpenSSL has a FIPS 140-2 certificate. FIPS mode must be enabled in the Linux kernel to ensure that FIPS 140-2 is enforced by the operating system. - Support Oauth authentication protocol for changefeeds +

Support Oauth authentication protocol for changefeeds

See this item in Change Data Capture (Changefeeds). - Support encrypted backups with keys stored in Azure Key Vault +

Support encrypted backups with keys stored in Azure Key Vault

-

You can now take and restore encrypted backups using RSA keys stored in Azure Key Vault. +

You can now take and restore encrypted backups using RSA keys stored in Azure Key Vault. - Expand on External Connections for Changefeeds +

Expand on External Connections for Changefeeds

See the Changefeeds section for more information. - New fine-grained system privilege to view all jobs +

New fine-grained system privilege to view all jobs

-

The new VIEWJOB system privilege allows a user to view all jobs when running commands like SHOW JOBS, without granting additional capabilities. This helps ensure alignment with the principle of least privilege by removing the need to assign broader privileges or role options like CONTROLJOB. +

The new VIEWJOB system privilege allows a user to view all jobs when running commands like SHOW JOBS, without granting additional capabilities. This helps ensure alignment with the principle of least privilege by removing the need to assign broader privileges or role options like CONTROLJOB. @@ -242,53 +242,53 @@ Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simp - Create scheduled exports using changefeeds +

Create scheduled exports using changefeeds

-

Changefeeds can offer benefits over existing export functionality for customers who need higher levels of scale and observability. You can now create changefeed exports on a schedule, similar to the scheduling feature for backups. This extends our existing Changefeeds as Export functionality. +

Changefeeds can offer benefits over existing export functionality for customers who need higher levels of scale and observability. You can now create Changefeeds as Export functionality. - Use a webhook as a changefeed sink +

Use a webhook as a changefeed sink

-

The use of a webhook sink to deliver changefeed messages to an arbitrary HTTPS endpoint has been promoted from Preview to GA. +

The use of a webhook sink to deliver changefeed messages to an arbitrary HTTPS endpoint has been promoted from Preview to GA. - Add parquet format to changefeeds +

Add parquet format to changefeeds

The Parquet format offers efficient compression and processing of large data exports, further extending the capabilities of Changefeeds as Exports. - CDC Queries +

CDC Queries

-

CDC Queries (Formerly CDC Transformations) have been promoted from Preview to GA. +

CDC Queries (Formerly CDC Transformations) have been promoted from Preview to GA. - Use External Connections (GA) to remove a data exfiltration vector +

Use External Connections (GA) to remove a data exfiltration vector

-

Use external connections to specify and interact with resources that are external from CockroachDB. With CREATE EXTERNAL CONNECTION, you define a name for an external connection while passing the provider URI and query parameters. BACKUP, RESTORE, IMPORT, EXPORT, and CREATE CHANGEFEED queries can interact with the defined external connection instead of a required, provider-specific URI. As a result, you can decouple the management and permissions of the external resource from the operation in which you're using them. +

Use external connections to specify and interact with resources that are external from CockroachDB. With CREATE EXTERNAL CONNECTION, you define a name for an external connection while passing the provider URI and query parameters. BACKUP, RESTORE, IMPORT, EXPORT, and CREATE CHANGEFEED queries can interact with the defined external connection instead of a required, provider-specific URI. As a result, you can decouple the management and permissions of the external resource from the operation in which you're using them.

With the move from Preview to GA, this feature brings many new capabilities, such as fine-grained permission and support for schema registries, webhook and GC PubSub sinks, and the SHOW command. - Changefeed locality +

Changefeed locality

-

CREATE CHANGEFEED now accepts a 'WITH execution_locality' option to restrict execution of the changefeed process to nodes within the specified locality filter. +

CREATE CHANGEFEED now accepts a 'WITH execution_locality' option to restrict execution of the changefeed process to nodes within the specified locality filter. - Improved changefeed resilience +

Improved changefeed resilience

Changefeeds are more stable as the result of improved error handling. Changefeeds now default to retrying requests when encountering any error, except those deemed terminal. - Support Oauth authentication protocol for changefeeds +

Support Oauth authentication protocol for changefeeds

-

Oauth authentication uses a third-party software provider to authenticate with Kafka instead of providing CockroachDB with direct access to Kafka cluster credentials. The third-party authentication server provides a temporary credential token that CockroachDB then uses to connect to a customer’s Kafka cluster. This represents a security best practice, allowing users to authenticate without directly storing or sharing their credentials. +

Oauth authentication uses a third-party software provider to authenticate with Kafka instead of providing CockroachDB with direct access to Kafka cluster credentials. The third-party authentication server provides a temporary credential token that CockroachDB then uses to connect to a customer’s Kafka cluster. This represents a security best practice, allowing users to authenticate without directly storing or sharing their credentials. @@ -305,13 +305,13 @@ With the move from Preview to GA, this feature brings many new capabilities, suc - Support longer incremental backup chains +

Support longer incremental backup chains

We now support up to 400 incremental backups, an 8x increase, enabling you to preserve your data even more effectively (reducing RPO) while being more cost-efficient. Incremental backups contain only the data that has changed since the last backup, so they are smaller and faster to produce. - Restrict backups to a locality +

Restrict backups to a locality

You can now restrict backup execution to a specific region/locality. Only the nodes in the specified locality filter will execute the backup, so only those nodes will need access to the backup storage bucket.

@@ -319,21 +319,21 @@ If the node executing the backup does not have a specific range to be backed up, - Support implicit authentication on Azure +

Support implicit authentication on Azure

-

You can now use implicit authentication on Azure for cloud storage authentication. +

You can now use implicit authentication on Azure for cloud storage authentication.

If the node executing the backup does not have a specific range to be backed up, it will read it from the closest replica it can. - Support encrypted backups with keys stored in Azure Key Vault +

Support encrypted backups with keys stored in Azure Key Vault

-

You can now take and restore encrypted backups using RSA keys stored in Azure Key Vault. +

You can now take and restore encrypted backups using RSA keys stored in Azure Key Vault. - Enforce supported backup versions +

Enforce supported backup versions

To help ensure backups and restores are successful, CockroachDB now enforces its previous support for restoring backups from up to two minor versions prior. Previously, restoring backups produced from even earlier versions was possible, but unreliable. Now, this operation is prevented with an error. @@ -354,33 +354,33 @@ If the node executing the backup does not have a specific range to be backed up, - Key Visualizer (Preview) +

Key Visualizer (Preview)

Using a visual heatmap of the latest historical range activity across the cluster, you can quickly identify hot spots and full table scans, enabling you to target ranges for performance investigations. - Enhanced Intelligent Insights experience includes transaction-level insights and metrics +

Enhanced Intelligent Insights experience includes transaction-level insights and metrics

-

We have expanded the Insights section of the Console, offering improved discoverability and data for tuning and optimizing your workload. Contention insights reveal the waiting statement for cases where blocking conflicts occur. Transaction-level insights help you identify impacted areas of your application and prioritize your investigations, enabling you to drill down to individual statements with suboptimal plans. +

We have expanded the [Insights](../v23.1/ui-insights-page.html#transaction-executions-view) section of the Console, offering improved discoverability and data for tuning and optimizing your workload. Contention insights reveal the waiting statement for cases where blocking conflicts occur. Transaction-level insights help you identify impacted areas of your application and prioritize your investigations, enabling you to drill down to individual statements with suboptimal plans. - Enhanced statement metrics such as: Latency profiles, CPU, MVCC garbage statistics, and Idle/client time is capture per statement +

Enhanced statement metrics

-

Easily correlate high-level cluster metrics (e.g., CPU, latency, etc.) with CPU utilization, latency metrics (P50, P90, P99, min, max), and MVCC garbage statistics per statement. +

Easily correlate high-level cluster metrics (e.g., CPU, latency, etc.) with per-statement CPU utilization, latency metrics (P50, P90, P99, min, max), idle/client time, and MVCC garbage statistics. - Faster performance and an enhanced UX for the SQL Activity pages +

Faster performance and an enhanced UX for the SQL Activity pages

Reliably and quickly find SQL activity information using a new interactive ‘Search Criteria’ capability in the console. - Correlate common troubleshooting user flows with additional observability information into indexes used per statement. +

Supplement troubleshooting flows with additional observability into indexes used per statement

-

Observability information is available to correlate statements (and their plans) to indexes. Users can map index usage statistics with statements and transactions which streamlines troubleshooting user flows such as dropping infrequently used indexes, creating or updating table statistics, reducing MVCC garbage, and alleviating resource hot spots. +

Observability information is available to correlate statements (and their plans) to indexes. You can now map index usage statistics with statements and transactions which streamlines troubleshooting flows such as dropping infrequently used indexes, creating or updating table statistics, reducing MVCC garbage, and alleviating resource hot spots. @@ -397,9 +397,9 @@ If the node executing the backup does not have a specific range to be backed up, - Decommission Pre-Flight Checks +

Decommission Pre-Flight Checks

-

Decommissioning operations now check that each replica on a node that is slated to be decommissioned can be moved to another node. +

Decommissioning operations now check that each replica on a node that is slated to be decommissioned can be moved to another node.

Any ranges that are not yet fully upreplicated will block the decommission process.

@@ -407,9 +407,9 @@ When errors are detected that would prevent successful decommissioning,, the err - Delegated Snapshots: Send Raft snapshots between follower replicas +

Delegated Snapshots: Send Raft snapshots between follower replicas

-

Delegated Snapshots make multi-region deployments more cost-efficient by decreasing the use of remote snapshots if there is a local snapshot with the data. +

Delegated Snapshots make multi-region deployments more cost-efficient by decreasing the use of remote snapshots if there is a local snapshot with the data.

Sending data locally reduces your network costs and frees up the WAN bandwidth for the important data that must be transferred

@@ -419,18 +419,18 @@ Delegated Snapshots fixes this problem by sending snapshots from a local replica - Faster leaseholder recovery +

Faster leaseholder recovery

-

The default CockroachDB lease duration has been reduced from 9 seconds to 6 seconds, to reduce range unavailability following leaseholder loss. Some other related settings have also had their defaults reduced, including heartbeat intervals, Raft election timeouts, and network timeouts. +

The default CockroachDB lease duration has been reduced from 9 seconds to 6 seconds, to reduce range unavailability following leaseholder loss. Some other related settings have also had their defaults reduced, including heartbeat intervals, Raft election timeouts, and network timeouts. - Lower default TTL for garbage collection (GC) +

Lower default TTL for garbage collection (GC)

-

The default GC TTL value in 23.1.0 is being reduced from 25 hrs to 4 hrs for new clusters. +

The default GC TTL value in 23.1.0 is being reduced from 25 hrs to 4 hrs for new clusters.

This change is being made to improve read performance, storage utilization, and cluster stability in high write traffic scenarios. -Scheduled backups will not be affected by this change as protected timestamps will ensure data isn't garbage-collected until it has been backed up. Changefeeds will also not be affected. If you want a 25-hour or larger GC TTL value (for example, to support AS OF SYSTEM TIME queries that go further back in time), you can explicitly set GC TTL to the desired value. +Scheduled backups will not be affected by this change as protected timestamps will ensure data isn't garbage-collected until it has been backed up. Changefeeds will also not be affected. If you want a 25-hour or larger GC TTL value (for example, to support AS OF SYSTEM TIME queries that go further back in time), you can explicitly set GC TTL to the desired value.

This change will only apply to new clusters. Existing clusters will retain the 25-hour default when upgrading, unless you have previously overridden it with an explicit value, in which case, that value will be retained. Backups taken from clusters running versions prior to v23.1 will similarly retain the GC TTL configured when the backup was taken. @@ -461,10 +461,12 @@ Before [upgrading to CockroachDB v23.1](../v23.1/upgrade-cockroach-version.html) - This syntax selects the tenant named `system` and then the database named `cluster:foo` inside it. When the `-ccluster:system` option is not specified, `cluster:foo` in the database name position is interpreted as a request to connect to a tenant named `foo`, which likely does not exist. Connections to databases whose name does not start with `cluster:` (the most common case) are not affected by this change. [#92580][#92580] - Changefeeds using "preview" expressions (released in v23.1.0) and that access the previous state of the row using the `cdc_prev()` function will no longer work and will need to be recreated with new syntax. [#94429][#94429] +- Some of the transformations specific to changefeeds have been deprecated and replaced. These functions were released in limited access in v22.2. Deprecated changefeed transformations continue to function. Closely monitor changefeeds that are created during upgrade. While effort was made to maintain backward compatibility, the updated changefeed transformation may produce slightly different output, such as different column names. [#96295][#96295] - Fixed a bug where, when `server.identity_map.configuration` was used, CockroachDB did not verify the client-provided username against the target mappings. Note that **this means that the client must now provide a valid DB username.** This requirement is compatible with PostgreSQL; it was not previously required by CockroachDB but it is now. This does not apply when identity maps are not in use. [#94915][#94915] - Previously, the type of the `replicas`, `voting_replicas`,`non_voting_replicas` and `learner_replicas` in `crdb_internal.ranges` were overridden to `INT2VECTOR` causing incompatible indexing between `.ranges` and `.ranges_no_leases`. Now the types of those columns in the two tables are set to `INT[]`. [#96287][#96287] - The output of the [`SHOW RANGES`](../v23.1/show-ranges.html) command for the `crdb_internal.ranges` and `crdb_internal.ranges_no_leases` tables has been updated, and the previous output is deprecated. To enable the new command output, set the `sql.show_ranges_deprecated_behavior.enabled` [cluster setting](../v23.1/cluster-settings.html) to `false`. The new output will become default in v23.2. [#99618][#99618] - Previously, if a user specified a [`search_path`](../v23.1/sql-name-resolution.html#current-schema) in the connection string parameters, it would always be treated as case sensitive. Now, in order to have the schema names in the `search_path` respect case, the user must include double quotes around the name. [#101492][#101492] +- The deprecated CLI command `debug unsafe-remove-dead-replicas` has been removed. Use `debug recover` instead. [#89150][#89150]

Key Cluster Setting Changes

@@ -472,14 +474,11 @@ The following changes should be reviewed prior to upgrading. Default cluster set | Category | Description | Change Type | Backport version | |---|---|---|---| -| SQL language change | The `backup.restore_span.target_size` cluster setting now defaults to `384 MiB `. This should reduce the number of ranges created during [restore](../v23.1/restore.html) and thereby reduce the merging of ranges that needs to occur after the restore. [#89333](https://github.com/cockroachdb/cockroach/pull/89333) | Changed default | v22.2.1 | | SQL language change | The [cluster setting](../v23.1/cluster-settings.html) `sql.ttl.default_range_concurrency` and table storage parameter `ttl_range_concurrency` are no longer configurable. [#89392](https://github.com/cockroachdb/cockroach/pull/89392) | No longer configurable | v22.2.1 | | SQL language change | The `sql.distsql.max_running_flows` [cluster setting](../v23.1/cluster-settings.html) has been removed. [#84888](https://github.com/cockroachdb/cockroach/pull/84888) | Removed | None | | Operational change | The [cluster settings](../v23.1/cluster-settings.html) `server.web_session.purge.period` and `server.web_session.purge.max_deletions_per_cycle`, which were specific to the cleanup function for `system.web_sessions`, have been replaced by `server.log_gc.period` and `server.log_gc.max_deletions_per_cycle` which apply to the cleanup function for `system.eventlog`, `system.rangelog` and `system.web_sessions` equally. [#90789](https://github.com/cockroachdb/cockroach/pull/90789) | Removed, repurposed | None | | Operational change | The [cluster setting](../v23.1/cluster-settings.html) `server.web_session.auto_logout.timeout` has been removed. [#90789](https://github.com/cockroachdb/cockroach/pull/90789) | Removed, defaults to true | None | -| {{ site.data.products.enterprise }} edition change | The `changefeed.active_protected_timestamps.enabled` [[cluster setting](../v23.1/cluster-settings.html)](../v23.1/cluster-settings.html) has been removed and is now always treated as if it was `true`. [#89975](https://github.com/cockroachdb/cockroach/pull/89975) | Changed default | None | -| {{ site.data.products.enterprise }} edition change | Increased the default `changefeed.memory.per_changefeed_limit` [cluster setting](../v23.1/cluster-settings.html) from `128MiB` to `512MiB`. This should result in changefeeds being able to produce larger files. [#96340](https://github.com/cockroachdb/cockroach/pull/96340) | Changed default | None | -| Operational change | The [load-based splitter](https://www.cockroachlabs.com/docs/v23.1/load-based-splitting) now supports using request CPU usage to split ranges. This is introduced with the previous cluster setting `kv.allocator.load_based_rebalancing.objective`, which when set to `cpu`, will use request CPU usage. The threshold above which CPU usage of a range is considered for splitting is defined in the cluster setting `kv.range_split.load_cpu_threshold`, which has a default value of `250ms`. (Relates to #100211 in this table.) [#96128](https://github.com/cockroachdb/cockroach/pull/96128) | Repurposed | None | +| Operational change | The [load-based splitter](../v23.1/load-based-splitting) now supports using request CPU usage to split ranges. This is introduced with the previous cluster setting `kv.allocator.load_based_rebalancing.objective`, which when set to `cpu`, will use request CPU usage. The threshold above which CPU usage of a range is considered for splitting is defined in the cluster setting `kv.range_split.load_cpu_threshold`, which has a default value of `250ms`. (Relates to #100211 in this table.) [#96128](https://github.com/cockroachdb/cockroach/pull/96128) | Repurposed | None | | Operational change | The `kv.range_split.load_cpu_threshold` [cluster setting](../v23.1/cluster-settings.html#setting-kv-range-split-load-cpu-threshold) now has a minimum setting value of `10ms`. Previously there was no minimum so, while unlikely, this could have an impact if you had chosen a custom setting lower than the established minimum. [#98250](https://github.com/cockroachdb/cockroach/pull/98250) | New minimum | None | | Security update | The new [cluster setting](/docs/v23.1/cluster-settings.html) `server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled`, which allows you to migrate passwords from SCRAM to bcrypt during user authentication, defaults to `true`. If it is `true` and if `server.user_login.password_encryption` is `crdb-bcrypt`, then during login, the stored hashed password will be migrated from SCRAM to bcrypt. [#97429](https://github.com/cockroachdb/cockroach/pull/97429) | New setting | v22.2.6 | | Security update | The default value for the `server.user_login.password_hashes.default_cost.scram_sha_256` [cluster setting](../v23.1/cluster-settings.html) is now 10610. (Previously the default was 119680.) The old value was found to have been too high for many types of client hardware, and in some cases could cause regressions in connection latency. The new value was chosen by running tests with clients that have 1 or 2 vCPUs provisioned. Additionally, the new cluster setting `server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled` was added, and defaults to `true`. If it is `true` and the stored SCRAM password for a user has a different cost than the configured default cost, then the next time the user logs in, their password will automatically be rehashed using the configured default cost. If the rehashing is not desired, then operators should update the `server.user_login.password_hashes.default_cost.scram_sha_256` cluster setting to the value they desire before upgrading. This change is being backported to [v22.2](../releases/v22.2.html). [#98254](https://github.com/cockroachdb/cockroach/pull/98254) | Changed default | v22.2.7 | @@ -490,7 +489,13 @@ The following changes should be reviewed prior to upgrading. Default cluster set

Deprecations

-tk +- Ordinal column references (e.g., `SELECT @1, @2 FROM t`) are now deprecated. By default, statements using this syntax will now result in an error. If desired, such statements can be allowed using the session setting `SET allow_ordinal_column_references=true`. Support for ordinal column references is scheduled to be removed in upcoming version v23.2. [#93754][#93754] +- The `CONTROLCHANGEFEED` [role option](../v23.1/alter-role.html#role-options) will be deprecated in the future (see issue [#94757](https://github.com/cockroachdb/cockroach/issues/94757)). With this change, usages of the `CONTROLCHANGEFEED` role option will come with a deprecation warning. Its existing behavior remains the same. The `SELECT` and `CHANGEFEED` privileges will be used for changefeeds henceforth: + - The `SELECT` privilege on a set of tables allows a user to run core changefeeds against them. + - The `CHANGEFEED` privilege on a set of tables allows a user to run enterprise changefeeds on them, and also manage the underlying changefeed job (ie. view, pause, cancel, and resume the job). + Notably, a new [cluster setting](../v23.1/cluster-settings.html) `changefeed.permissions.enforce_external_connections` is added and set to `false` by default. Enabling this setting restricts users with `CHANGEFEED` on a set of tables to create enterprise changefeeds into external connections only. To use a given external connection, a user typically needs the `USAGE` privilege on it. Note that `ALTER DEFAULT PRIVILEGES` can be used with both the `CHANGEFEED` and `SELECT` privileges to assign coarse-grained permissions (i.e., assign permissions to all tables in a schema rather than manually assign them for each table). [#94796][#94796] +- Deprecated the `PGDUMP` and `MYSQLDUMP` formats for [`IMPORT`](../v23.1/import.html). They are still present, but will be removed in a future release. See the [Migration Overview](../v23.1/migration-overview.html) page for alternatives. [#96386][#96386] +- Ordinal column references (e.g., `SELECT @1, @2 FROM t`) are now deprecated. By default, statements using this syntax will now result in an error. If desired, such statements can be allowed using the session setting `SET allow_ordinal_column_references=true`. Support for ordinal column references is scheduled to be removed in upcoming version v23.2. [#93754][#93754]

Known limitations

From 4daefdbf70c613eba941b5e1ebcff0c325410718 Mon Sep 17 00:00:00 2001 From: Rich Loveland Date: Mon, 15 May 2023 10:50:43 -0400 Subject: [PATCH 11/18] Clarify status of new SHOW RANGES output --- _includes/releases/v23.1/v23.1.0.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 523ef29862a..510f1c1d08d 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -444,7 +444,7 @@ Before [upgrading to CockroachDB v23.1](../v23.1/upgrade-cockroach-version.html) - Replaced the `cdc_prev()` [function](../v23.1/functions-and-operators.html) in favor of a `cdc_prev` tuple. This is an incompatible change that may break [changefeeds](../v23.1/change-data-capture-overview.html) that use the previous `cdc_prev()` function. [#85177][#85177] - [`SHOW RANGES FOR TABLE`](../v23.1/show-ranges.html) now includes rows for all indexes that support the table. Prior to this change, `SHOW RANGES FOR TABLE foo` was an alias for `SHOW RANGES FOR INDEX foo@primary`. This was causing confusion, as it would miss data for secondary indexes. It is still possible to filter to just the primary index using `SHOW RANGES FOR INDEX foo@primary`. The statement output now also includes the index name. [#93545][#93545] -- CockroachDB now supports sharing storage ranges across multiple indexes/tables. As a result, there is no longer a guarantee that there is at most one SQL object (e.g., table/index/sequence/materialized view) per storage range. Therefore, the columns `table_id`, `database_name`, `schema_name`, `table_name` and `index_name` in `crdb_internal.ranges` and `.ranges_no_leases` have become nonsensical: a range cannot be attributed to a single table/index anymore. As a result: +- CockroachDB now supports sharing storage ranges across multiple indexes/tables. As a result, there is no longer a guarantee that there is at most one SQL object (e.g., table/index/sequence/materialized view) per storage range. Therefore, the columns `table_id`, `database_name`, `schema_name`, `table_name` and `index_name` in `crdb_internal.ranges` and `.ranges_no_leases` have become nonsensical: a range cannot be attributed to a single table/index anymore. For the v23.1 release, the default behavior of [`SHOW RANGES`](../v23.1/show-ranges.html) is retained, but users should consider setting the [cluster setting `sql.show_ranges_deprecated_behavior.enabled` to `false`](../v23.1/cluster-settings.html#setting-sql-show-ranges-deprecated-behavior-enabled). This will have the following effects that will become the defaults in a future release: - The aforementioned columns in the `crdb_internal` virtual tables have been removed. Existing code can use the [`SHOW RANGES`](../v23.1/show-ranges.html) statement instead, optionally using `WITH KEYS` to expose the raw start/end keys. - `SHOW RANGES FROM DATABASE` continues to report one row per range, but stops returning the database / schema / table / index name. @@ -453,9 +453,9 @@ Before [upgrading to CockroachDB v23.1](../v23.1/upgrade-cockroach-version.html) - Instead of `SELECT range_id FROM crdb_internal.ranges WHERE table_name = $1 OR table_id = $2` (variable / unpredictable table name or ID), use: `SELECT range_id FROM [SHOW RANGES FROM CURRENT_CATALOG WITH TABLES] WHERE table_name = $1 OR table_id = $2` - Instead of `SELECT start_key FROM crdb_internal.ranges WHERE table_name = 'x'`, use: `SELECT raw_start_key FROM [SHOW RANGES FROM TABLE x WITH KEYS]` - Instead of `SELECT start_key FROM crdb_internal.ranges WHERE table_name = $1 OR table_id = $2` (unpredictable / variable table name or ID), use: `SELECT raw_start_key FROM [SHOW RANGES FROM CURRENT_CATALOG WITH TABLES, KEYS] WHERE table_name = $1 OR table_id = $2` [#93644][#93644] -- The format of the columns `start_key` and `end_key` for `SHOW RANGES FROM DATABASE` and `SHOW RANGES FROM TABLE` have been extended to include which table/index the key belongs to. This is necessary because a range can now contain data from more than one table/index. [#93644][#93644] +- When the [cluster setting `sql.show_ranges_deprecated_behavior.enabled` is set to `false` (recommended in v23.1)](../v23.1/cluster-settings.html#setting-sql-show-ranges-deprecated-behavior-enabled), the format of the columns `start_key` and `end_key` for [`SHOW RANGES FROM DATABASE`](../v23.1/show-ranges.html) and `SHOW RANGES FROM TABLE` have been extended to include which table/index the key belongs to. This is necessary because a range can now contain data from more than one table/index. [#93644][#93644] +- When the [cluster setting `sql.show_ranges_deprecated_behavior.enabled` is set to `false` (recommended in v23.1)](../v23.1/cluster-settings.html#setting-sql-show-ranges-deprecated-behavior-enabled), the output of [`SHOW RANGES`](../v23.1/show-ranges.html) no longer includes `range_size`, `range_size_mb`, `lease_holder`, or `lease_holder_localities` by default. This ensures that `SHOW RANGES` remains fast in the common case. Use the new option [`WITH DETAILS`](../v23.1/show-ranges.html#options) to include these columns. [#93644][#93644] - The format of the columns `start_key` and `end_key` for `SHOW RANGE ... FOR ROW` has been changed to be consistent with the output of `SHOW RANGES FROM INDEX`. [#93644][#93644] -- The output of [`SHOW RANGES`](../v23.1/show-ranges.html) no longer includes `range_size`, `range_size_mb`, `lease_holder`, or `lease_holder_localities` by default. This ensures that `SHOW RANGES` remains fast in the common case. Use the new option `WITH DETAILS` to include these columns. [#93644][#93644] - If a SQL database is created with a name that starts with `cluster:...` (e.g., `CREATE DATABASE "cluster:foo"`, clients will no longer be able to connect to it directly via a pre-existing URL connection string. The URL will need to be modified in this case. For example: - Previously: `postgres://servername/cluster:foo`; now: `postgres://servername/cluster:foo&options=-ccluster=system` - This syntax selects the tenant named `system` and then the database named `cluster:foo` inside it. When the `-ccluster:system` option is not specified, `cluster:foo` in the database name position is interpreted as a request to connect to a tenant named `foo`, which likely does not exist. Connections to databases whose name does not start with `cluster:` (the most common case) are not affected by this change. [#92580][#92580] From b18bbf18564d1cfb3fdfade93adca6d32f536470 Mon Sep 17 00:00:00 2001 From: shannonbradshaw Date: Mon, 15 May 2023 11:18:21 -0400 Subject: [PATCH 12/18] Docs updates to additional resources section --- _includes/releases/v23.1/v23.1.0.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 510f1c1d08d..31e79ac09c7 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -510,9 +510,9 @@ Cockroach University | [Practical First Steps with CockroachDB](https://universi Cockroach University | [Building a Highly Resilient Multi-region Database using CockroachDB](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-resilience-in-multi-region+self-paced/about) | This course is part of a series introducing solutions to running low-latency, highly resilient applications for data-intensive workloads on CockroachDB. In this course we focus on surviving large-scale infrastructure failures like losing an entire cloud region without losing data during recovery. We’ll show you how to use CockroachDB survival goals in a multi-region cluster to implement a highly resilient database that survives node or network failures across multiple regions with zero data loss. Cockroach University | [Introduction to Serverless Databases and CockroachDB Serverless](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-serverless+self-paced/about) | This course introduces the core concepts behind serverless databases and gives you the tools you need to get started with CockroachDB Serverless. You will learn how serverless databases remove the burden of configuring, sizing, provisioning, securing, maintaining and dynamically scaling your database based on load. This means you simply pay for the serverless database resources you use. Docs | [Migration Overview](../v23.1/migration-overview.html) | This page summarizes the steps of migrating a database to CockroachDB, which include testing and updating your schema to work with CockroachDB, moving your data into CockroachDB, and testing and updating your application. -Docs | [Unsupported Features in CockroachDB Serverless](../cockroachcloud/serverless-unsupported-features.html) | This page describes the features that are either unsupported or partially supported in CockroachDB serverless clusters -Docs | [Sample apps with ccloud](../v23.1/build-a-nodejs-app-with-cockroachdb.html?filters=ccloud) | Sample application docs now includes steps to create a {{ site.data.products.serverless }} cluster using the `ccloud` CLI tool. -Docs | [API Support Policy](../v23.1/api-support-policy.html) | This page includes the following information: our API support policies, our definitions of backward-incompatible and backward-compatible changes, and a summary of APIs that CockroachDB makes available. -Docs | [CockroachDB Kubernetes Operator release notes](../releases/kubernetes-operator.html) | The CockroachDB Kubernetes Operator-specific release notes are now surfaced on this page. -Docs | [HashiCorp Vault tutorial](../v23.1/vault-db-secrets-tutorial.html) | This pages reviews the supported integrations between CockroachDB and HashiCorp's Vault, which offers tooling to extend CockroachDB's data security capabilities. -Docs | [Backup architecture](../v23.1/backup-architecture.html) | This page describes the backup job workflow with a high-level overview, diagrams, and more details on each phase of the job. +Docs | [Developer Guide Overview](../v23.1/developer-guide-overview.html) | This page provides an overview of resources available to developers building applications on CockroachDB. +Docs | [Security Overview](../v23.1/security-reference/security-overview.html) | The 23.1 release encapsulates a number of security milestones. See the security overview for a summary. +Docs | [Architecture Overview](../v23.1/architecture/overview.html) | This page provides a starting point for understanding the architecture and design choices that enable CockroachDB's scalability and consistency capabilities. +Docs | [SQL Feature Support](../v23.1/sql-feature-support.html) | The page summarizes the standard SQL features CockroachDB supports as well as common extensions to the standard. +Docs | [Change Data Capture Overview](../v23.1/change-data-capture-overview.html) | This page summarizes CockroachDB's data streaming capabilities. Change data capture (CDC) provides efficient, distributed, row-level changefeeds into a configurable sink for downstream processing such as reporting, caching, or full-text indexing. +Docs | [Backup Architecture](../v23.1/backup-architecture.html) | This page describes the backup job workflow with a high-level overview, diagrams, and more details on each phase of the job. From 2168f9cbe34153dfb894e72be0a420feaa9f2fdd Mon Sep 17 00:00:00 2001 From: shannonbradshaw Date: Mon, 15 May 2023 11:53:35 -0400 Subject: [PATCH 13/18] Fix link errors missing .html --- _includes/releases/v23.1/v23.1.0.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 31e79ac09c7..27faa5c2ba0 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -4,7 +4,7 @@ Release Date: May 15, 2023 With the release of CockroachDB v23.1, we've added new capabilities in CockroachDB to help you migrate, build, and operate more efficiently. Check out a [summary of the most significant user-facing changes](#v23-1-0-feature-highlights) and then [upgrade to CockroachDB v23.1](../v23.1/upgrade-cockroach-version.html). -To learn more about the launch, see the [v23.1 launch page](cockroachlabs.com/whatsnew). +To learn more about the launch, see the [v23.1 launch page](cockroachlabs.com/whatsnew.html). {% include releases/release-downloads-docker-image.md release=include.release %} @@ -55,7 +55,7 @@ The features highlighted below are freely available in {{ site.data.products.cor

Full-text search using TSVector and TSQuery

A full-text search is used to perform queries on natural-language documents such as articles, websites, or other written formats, with results often sorted by relevance.

-

You can rely on new built-in functions to make use of the new TSVECTOR and TSQUERY data types.

+

You can rely on new built-in functions to make use of the new TSVECTOR and TSQUERY data types.

@@ -244,7 +244,7 @@ Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simp

Create scheduled exports using changefeeds

-

Changefeeds can offer benefits over existing export functionality for customers who need higher levels of scale and observability. You can now create Changefeeds as Export functionality. +

Changefeeds can offer benefits over existing export functionality for customers who need higher levels of scale and observability. You can now create Changefeeds as Export functionality. @@ -430,7 +430,7 @@ Delegated Snapshots fixes this problem by sending snapshots from a local replica

The default GC TTL value in 23.1.0 is being reduced from 25 hrs to 4 hrs for new clusters.

This change is being made to improve read performance, storage utilization, and cluster stability in high write traffic scenarios. -Scheduled backups will not be affected by this change as protected timestamps will ensure data isn't garbage-collected until it has been backed up. Changefeeds will also not be affected. If you want a 25-hour or larger GC TTL value (for example, to support AS OF SYSTEM TIME queries that go further back in time), you can explicitly set GC TTL to the desired value. +Scheduled backups will not be affected by this change as protected timestamps will ensure data isn't garbage-collected until it has been backed up. Changefeeds will also not be affected. If you want a 25-hour or larger GC TTL value (for example, to support AS OF SYSTEM TIME queries that go further back in time), you can explicitly set GC TTL to the desired value.

This change will only apply to new clusters. Existing clusters will retain the 25-hour default when upgrading, unless you have previously overridden it with an explicit value, in which case, that value will be retained. Backups taken from clusters running versions prior to v23.1 will similarly retain the GC TTL configured when the backup was taken. @@ -478,7 +478,7 @@ The following changes should be reviewed prior to upgrading. Default cluster set | SQL language change | The `sql.distsql.max_running_flows` [cluster setting](../v23.1/cluster-settings.html) has been removed. [#84888](https://github.com/cockroachdb/cockroach/pull/84888) | Removed | None | | Operational change | The [cluster settings](../v23.1/cluster-settings.html) `server.web_session.purge.period` and `server.web_session.purge.max_deletions_per_cycle`, which were specific to the cleanup function for `system.web_sessions`, have been replaced by `server.log_gc.period` and `server.log_gc.max_deletions_per_cycle` which apply to the cleanup function for `system.eventlog`, `system.rangelog` and `system.web_sessions` equally. [#90789](https://github.com/cockroachdb/cockroach/pull/90789) | Removed, repurposed | None | | Operational change | The [cluster setting](../v23.1/cluster-settings.html) `server.web_session.auto_logout.timeout` has been removed. [#90789](https://github.com/cockroachdb/cockroach/pull/90789) | Removed, defaults to true | None | -| Operational change | The [load-based splitter](../v23.1/load-based-splitting) now supports using request CPU usage to split ranges. This is introduced with the previous cluster setting `kv.allocator.load_based_rebalancing.objective`, which when set to `cpu`, will use request CPU usage. The threshold above which CPU usage of a range is considered for splitting is defined in the cluster setting `kv.range_split.load_cpu_threshold`, which has a default value of `250ms`. (Relates to #100211 in this table.) [#96128](https://github.com/cockroachdb/cockroach/pull/96128) | Repurposed | None | +| Operational change | The [load-based splitter](../v23.1/load-based-splitting.html) now supports using request CPU usage to split ranges. This is introduced with the previous cluster setting `kv.allocator.load_based_rebalancing.objective`, which when set to `cpu`, will use request CPU usage. The threshold above which CPU usage of a range is considered for splitting is defined in the cluster setting `kv.range_split.load_cpu_threshold`, which has a default value of `250ms`. (Relates to #100211 in this table.) [#96128](https://github.com/cockroachdb/cockroach/pull/96128) | Repurposed | None | | Operational change | The `kv.range_split.load_cpu_threshold` [cluster setting](../v23.1/cluster-settings.html#setting-kv-range-split-load-cpu-threshold) now has a minimum setting value of `10ms`. Previously there was no minimum so, while unlikely, this could have an impact if you had chosen a custom setting lower than the established minimum. [#98250](https://github.com/cockroachdb/cockroach/pull/98250) | New minimum | None | | Security update | The new [cluster setting](/docs/v23.1/cluster-settings.html) `server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled`, which allows you to migrate passwords from SCRAM to bcrypt during user authentication, defaults to `true`. If it is `true` and if `server.user_login.password_encryption` is `crdb-bcrypt`, then during login, the stored hashed password will be migrated from SCRAM to bcrypt. [#97429](https://github.com/cockroachdb/cockroach/pull/97429) | New setting | v22.2.6 | | Security update | The default value for the `server.user_login.password_hashes.default_cost.scram_sha_256` [cluster setting](../v23.1/cluster-settings.html) is now 10610. (Previously the default was 119680.) The old value was found to have been too high for many types of client hardware, and in some cases could cause regressions in connection latency. The new value was chosen by running tests with clients that have 1 or 2 vCPUs provisioned. Additionally, the new cluster setting `server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled` was added, and defaults to `true`. If it is `true` and the stored SCRAM password for a user has a different cost than the configured default cost, then the next time the user logs in, their password will automatically be rehashed using the configured default cost. If the rehashing is not desired, then operators should update the `server.user_login.password_hashes.default_cost.scram_sha_256` cluster setting to the value they desire before upgrading. This change is being backported to [v22.2](../releases/v22.2.html). [#98254](https://github.com/cockroachdb/cockroach/pull/98254) | Changed default | v22.2.7 | From 8bc81a6c206b96b2da6700ebb278c9e208e08081 Mon Sep 17 00:00:00 2001 From: Lauren Hirata Singh Date: Mon, 15 May 2023 13:01:51 -0400 Subject: [PATCH 14/18] Update _includes/releases/v23.1/v23.1.0.md Co-authored-by: Kathryn Hancox <44557882+kathancox@users.noreply.github.com> --- _includes/releases/v23.1/v23.1.0.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 27faa5c2ba0..34cdc8f0fb9 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -253,12 +253,6 @@ Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simp

The use of a webhook sink to deliver changefeed messages to an arbitrary HTTPS endpoint has been promoted from Preview to GA. - -

Add parquet format to changefeeds

- -

The Parquet format offers efficient compression and processing of large data exports, further extending the capabilities of Changefeeds as Exports. - -

CDC Queries

From b23592cb20dcaa774f2739f8df0ed17252fa76ab Mon Sep 17 00:00:00 2001 From: Lauren Hirata Singh Date: Mon, 15 May 2023 13:01:57 -0400 Subject: [PATCH 15/18] Update _includes/releases/v23.1/v23.1.0.md Co-authored-by: Kathryn Hancox <44557882+kathancox@users.noreply.github.com> --- _includes/releases/v23.1/v23.1.0.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 34cdc8f0fb9..6c4487a2690 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -256,7 +256,7 @@ Asyncpg is commonly used with ORM libraries such as SQLAlchemy to provide a simp

CDC Queries

-

CDC Queries (Formerly CDC Transformations) have been promoted from Preview to GA. +

CDC Queries (formerly CDC Transformations) have been promoted from Preview to GA. From 030369ea3d8a25f96bd2220070cfd0cbb50b9a90 Mon Sep 17 00:00:00 2001 From: Lauren Hirata Singh Date: Mon, 15 May 2023 13:02:06 -0400 Subject: [PATCH 16/18] Update _includes/releases/v23.1/v23.1.0.md Co-authored-by: Kathryn Hancox <44557882+kathancox@users.noreply.github.com> --- _includes/releases/v23.1/v23.1.0.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index 6c4487a2690..c5d8f58899e 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -307,7 +307,7 @@ With the move from Preview to GA, this feature brings many new capabilities, suc

Restrict backups to a locality

-

You can now restrict backup execution to a specific region/locality. Only the nodes in the specified locality filter will execute the backup, so only those nodes will need access to the backup storage bucket. +

You can now [restrict backup execution](take-locality-restricted-backups.html) to a specific region/locality. Only the nodes in the specified locality filter will execute the backup, so only those nodes will need access to the backup storage bucket.

If the node executing the backup does not have a specific range to be backed up, it will read it from the closest replica it can. From 0c253755b8c3be1eeab8a69a5a2b6eee5596113e Mon Sep 17 00:00:00 2001 From: Lauren Hirata Singh Date: Mon, 15 May 2023 13:25:19 -0400 Subject: [PATCH 17/18] Update versions.csv --- _data/versions.csv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_data/versions.csv b/_data/versions.csv index 702fcd59e76..69fcaa6d8b0 100644 --- a/_data/versions.csv +++ b/_data/versions.csv @@ -11,4 +11,4 @@ v21.1,2021-05-18,2022-05-18,2022-11-18,v20.2 v21.2,2021-11-16,2022-11-16,2023-05-16,v21.1 v22.1,2022-05-24,2023-05-24,2023-11-24,v21.2 v22.2,2022-12-05,2023-12-05,2024-06-05,v22.1 -v23.1,2023-05-10,2024-05-15,2024-11-15,v22.2 +v23.1,2023-05-15,2024-05-15,2024-11-15,v22.2 From a992949014fd97a39d264186f337946d488fb081 Mon Sep 17 00:00:00 2001 From: Lauren Hirata Singh Date: Mon, 15 May 2023 15:27:39 -0400 Subject: [PATCH 18/18] Update v23.1.0.md Remove broken link until after GA release --- _includes/releases/v23.1/v23.1.0.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/_includes/releases/v23.1/v23.1.0.md b/_includes/releases/v23.1/v23.1.0.md index c5d8f58899e..84b3f5aba3e 100644 --- a/_includes/releases/v23.1/v23.1.0.md +++ b/_includes/releases/v23.1/v23.1.0.md @@ -4,8 +4,6 @@ Release Date: May 15, 2023 With the release of CockroachDB v23.1, we've added new capabilities in CockroachDB to help you migrate, build, and operate more efficiently. Check out a [summary of the most significant user-facing changes](#v23-1-0-feature-highlights) and then [upgrade to CockroachDB v23.1](../v23.1/upgrade-cockroach-version.html). -To learn more about the launch, see the [v23.1 launch page](cockroachlabs.com/whatsnew.html). - {% include releases/release-downloads-docker-image.md release=include.release %}

{{ site.data.products.db }}