From 7f59bd8b8ae11e6fb9db36f6410f52a93632e6b0 Mon Sep 17 00:00:00 2001 From: Dmitry Uspenskiy <47734295+d-uspenskiy@users.noreply.github.com> Date: Fri, 13 Sep 2024 10:32:13 +0300 Subject: [PATCH 01/75] [#22519] YSQL: Substitute YBSetRowLockPolicy function with YBGetDocDBWaitPolicy Summary: Auxiliary diff to fix `TODO` section to simplify the `YBSetRowLockPolicy` function usage by substituting it with the `YBGetDocDBWaitPolicy` function. The purpose of this diff is to reduce changes in main diff for the #22519 issue (https://phorge.dev.yugabyte.com/D37821) Jira: DB-11445 Test Plan: Jenkins Reviewers: kramanathan, tfoucher Reviewed By: tfoucher Subscribers: yql Tags: #jenkins-ready Differential Revision: https://phorge.dev.yugabyte.com/D38003 --- .../src/backend/access/yb_access/yb_scan.c | 15 +++------------ .../src/backend/executor/nodeIndexscan.c | 4 ++-- .../src/backend/executor/nodeYbSeqscan.c | 5 ++--- src/postgres/src/backend/executor/ybc_fdw.c | 4 ++-- .../src/backend/utils/misc/pg_yb_utils.c | 17 ++++++++--------- src/postgres/src/include/pg_yb_utils.h | 8 +++++--- 6 files changed, 22 insertions(+), 31 deletions(-) diff --git a/src/postgres/src/backend/access/yb_access/yb_scan.c b/src/postgres/src/backend/access/yb_access/yb_scan.c index e26557ee2b82..a8c18eda5b6a 100644 --- a/src/postgres/src/backend/access/yb_access/yb_scan.c +++ b/src/postgres/src/backend/access/yb_access/yb_scan.c @@ -3709,15 +3709,6 @@ HeapTuple YBCFetchTuple(Relation relation, Datum ybctid) return tuple; } -// TODO: Substitute the YBSetRowLockPolicy with this function -static int -YBCGetRowLockPolicy(LockWaitPolicy pg_wait_policy) -{ - int docdb_wait_policy; - YBSetRowLockPolicy(&docdb_wait_policy, pg_wait_policy); - return docdb_wait_policy; -} - /* * The return value of this function depends on whether we are batching or not. * Currently, batching is enabled if the GUC yb_explicit_row_locking_batch_size > 1 @@ -3731,7 +3722,7 @@ YBCLockTuple( Relation relation, Datum ybctid, RowMarkType mode, LockWaitPolicy pg_wait_policy, EState* estate) { - int docdb_wait_policy = YBCGetRowLockPolicy(pg_wait_policy); + const int docdb_wait_policy = YBGetDocDBWaitPolicy(pg_wait_policy); const YBCPgExplicitRowLockParams lock_params = { .rowmark = mode, .pg_wait_policy = pg_wait_policy, @@ -3740,8 +3731,8 @@ YBCLockTuple( const Oid relfile_oid = YbGetRelfileNodeId(relation); const Oid db_oid = YBCGetDatabaseOid(relation); - if (yb_explicit_row_locking_batch_size > 1 - && lock_params.pg_wait_policy != LockWaitSkip) + if (yb_explicit_row_locking_batch_size > 1 && + lock_params.pg_wait_policy != LockWaitSkip) { // TODO: Error message requires conversion HandleYBStatus(YBCAddExplicitRowLockIntent( diff --git a/src/postgres/src/backend/executor/nodeIndexscan.c b/src/postgres/src/backend/executor/nodeIndexscan.c index 47910310dcb8..1186135e5b44 100644 --- a/src/postgres/src/backend/executor/nodeIndexscan.c +++ b/src/postgres/src/backend/executor/nodeIndexscan.c @@ -178,8 +178,8 @@ IndexNext(IndexScanState *node) { scandesc->yb_exec_params->rowmark = erm->markType; scandesc->yb_exec_params->pg_wait_policy = erm->waitPolicy; - YBSetRowLockPolicy(&scandesc->yb_exec_params->docdb_wait_policy, - erm->waitPolicy); + scandesc->yb_exec_params->docdb_wait_policy = + YBGetDocDBWaitPolicy(erm->waitPolicy); } break; } diff --git a/src/postgres/src/backend/executor/nodeYbSeqscan.c b/src/postgres/src/backend/executor/nodeYbSeqscan.c index 80a7bec91f6f..1cf7ae39b898 100644 --- a/src/postgres/src/backend/executor/nodeYbSeqscan.c +++ b/src/postgres/src/backend/executor/nodeYbSeqscan.c @@ -143,9 +143,8 @@ YbSeqNext(YbSeqScanState *node) { scandesc->ybscan->exec_params->rowmark = erm->markType; scandesc->ybscan->exec_params->pg_wait_policy = erm->waitPolicy; - YBSetRowLockPolicy( - &scandesc->ybscan->exec_params->docdb_wait_policy, - erm->waitPolicy); + scandesc->ybscan->exec_params->docdb_wait_policy = + YBGetDocDBWaitPolicy(erm->waitPolicy); } break; } diff --git a/src/postgres/src/backend/executor/ybc_fdw.c b/src/postgres/src/backend/executor/ybc_fdw.c index 78276a7dec57..6b1deb026eef 100644 --- a/src/postgres/src/backend/executor/ybc_fdw.c +++ b/src/postgres/src/backend/executor/ybc_fdw.c @@ -361,8 +361,8 @@ ybcBeginForeignScan(ForeignScanState *node, int eflags) { ybc_state->exec_params->rowmark = erm->markType; ybc_state->exec_params->pg_wait_policy = erm->waitPolicy; - YBSetRowLockPolicy(&ybc_state->exec_params->docdb_wait_policy, - erm->waitPolicy); + ybc_state->exec_params->docdb_wait_policy = + YBGetDocDBWaitPolicy(erm->waitPolicy); } break; } diff --git a/src/postgres/src/backend/utils/misc/pg_yb_utils.c b/src/postgres/src/backend/utils/misc/pg_yb_utils.c index 71e156e45f8e..da8c9f8ff978 100644 --- a/src/postgres/src/backend/utils/misc/pg_yb_utils.c +++ b/src/postgres/src/backend/utils/misc/pg_yb_utils.c @@ -4423,8 +4423,10 @@ uint64_t YbGetSharedCatalogVersion() return version; } -void YBSetRowLockPolicy(int *docdb_wait_policy, LockWaitPolicy pg_wait_policy) +LockWaitPolicy YBGetDocDBWaitPolicy(LockWaitPolicy pg_wait_policy) { + LockWaitPolicy result = pg_wait_policy; + if (XactIsoLevel == XACT_REPEATABLE_READ && pg_wait_policy == LockWaitError) { /* The user requested NOWAIT, which isn't allowed in RR. */ @@ -4443,14 +4445,10 @@ void YBSetRowLockPolicy(int *docdb_wait_policy, LockWaitPolicy pg_wait_policy) "(GH issue #11761)", pg_wait_policy == LockWaitSkip ? "SKIP LOCKED" : "NO WAIT"); - *docdb_wait_policy = LockWaitBlock; - } - else - { - *docdb_wait_policy = pg_wait_policy; + result = LockWaitBlock; } - if (*docdb_wait_policy == LockWaitBlock && !YBIsWaitQueueEnabled()) + if (result == LockWaitBlock && !YBIsWaitQueueEnabled()) { /* * If wait-queues are not enabled, we default to the "Fail-on-Conflict" policy which is @@ -4459,9 +4457,10 @@ void YBSetRowLockPolicy(int *docdb_wait_policy, LockWaitPolicy pg_wait_policy) * semantics but to Fail-on-Conflict semantics). */ elog(DEBUG1, "Falling back to LockWaitError since wait-queues are not enabled"); - *docdb_wait_policy = LockWaitError; + result = LockWaitError; } - elog(DEBUG2, "docdb_wait_policy=%d pg_wait_policy=%d", *docdb_wait_policy, pg_wait_policy); + elog(DEBUG2, "docdb_wait_policy=%d pg_wait_policy=%d", result, pg_wait_policy); + return result; } uint32_t YbGetNumberOfDatabases() diff --git a/src/postgres/src/include/pg_yb_utils.h b/src/postgres/src/include/pg_yb_utils.h index 6b7b4c63bbf9..c48148e68527 100644 --- a/src/postgres/src/include/pg_yb_utils.h +++ b/src/postgres/src/include/pg_yb_utils.h @@ -984,19 +984,21 @@ bool YbCatalogVersionTableInPerdbMode(); * This function maps the user intended row-level lock policy i.e., "pg_wait_policy" of * type enum LockWaitPolicy to the "docdb_wait_policy" of type enum WaitPolicy as defined in * common.proto. + * Note: enum WaitPolicy values are equal to enum LockWaitPolicy. + * That is why function maps enum LockWaitPolicy into enum LockWaitPolicy. * * The semantics of the WaitPolicy enum differ slightly from those of the traditional LockWaitPolicy * in Postgres, as explained in common.proto. This is for historical reasons. WaitPolicy in * common.proto was created as a copy of LockWaitPolicy to be passed to the Tserver to help in * appropriate conflict-resolution steps for the different row-level lock policies. * - * In isolation level SERIALIZABLE, this function sets docdb_wait_policy to WAIT_BLOCK as + * In isolation level SERIALIZABLE, this function returns WAIT_BLOCK as * this is the only policy currently supported for SERIALIZABLE. * * However, if wait queues aren't enabled in the following cases: * * Isolation level SERIALIZABLE * * The user requested LockWaitBlock in another isolation level - * this function sets docdb_wait_policy to WAIT_ERROR (which actually uses the "Fail on Conflict" + * this function returns WAIT_ERROR (which actually uses the "Fail on Conflict" * conflict management policy instead of "no wait" semantics, as explained in "enum WaitPolicy" in * common.proto). * @@ -1006,7 +1008,7 @@ bool YbCatalogVersionTableInPerdbMode(); * 2. In isolation level REPEATABLE READ for a pg_wait_policy of LockWaitError because NOWAIT * is not supported. */ -void YBSetRowLockPolicy(int *docdb_wait_policy, LockWaitPolicy pg_wait_policy); +LockWaitPolicy YBGetDocDBWaitPolicy(LockWaitPolicy pg_wait_policy); const char *yb_fetch_current_transaction_priority(void); From 027ce5b5a5e357ca08e7acd21d11843c036c12ec Mon Sep 17 00:00:00 2001 From: Dwight Hodge <79169168+ddhodge@users.noreply.github.com> Date: Fri, 13 Sep 2024 10:42:22 -0400 Subject: [PATCH 02/75] [doc] Configure CLion database project (#23856) * CLion database project * format * review comments * edit * format * review comments * unversioned * icons * icon * icon * Update docs/content/preview/contribute/core-database/clion-setup.md Co-authored-by: Aishwarya Chakravarthy --------- Co-authored-by: Aishwarya Chakravarthy --- .../Yugabyte/spelling-exceptions.txt | 1 + docs/content/preview/contribute/_index.md | 4 +- .../contribute/core-database/_index.md | 89 +++++++------------ .../contribute/core-database/clion-setup.md | 60 +++++++------ .../content/preview/contribute/docs/_index.md | 89 +++++++------------ 5 files changed, 101 insertions(+), 142 deletions(-) diff --git a/.github/vale-styles/Yugabyte/spelling-exceptions.txt b/.github/vale-styles/Yugabyte/spelling-exceptions.txt index 4db775383484..593e5d940f07 100644 --- a/.github/vale-styles/Yugabyte/spelling-exceptions.txt +++ b/.github/vale-styles/Yugabyte/spelling-exceptions.txt @@ -113,6 +113,7 @@ checksumming CIDRs Citrix Citus +CLion clonable Cloudwatch CMake diff --git a/docs/content/preview/contribute/_index.md b/docs/content/preview/contribute/_index.md index f72fa10fd86a..96ae5a135a31 100644 --- a/docs/content/preview/contribute/_index.md +++ b/docs/content/preview/contribute/_index.md @@ -3,9 +3,11 @@ title: Contribute to YugabyteDB headerTitle: Contribute to YugabyteDB linkTitle: Contribute description: Contribute to the YugabyteDB code and documentation. -image: /images/section_icons/index/quick_start.png +image: fa-light fa-code-pull-request headcontent: Contribute code and docs to improve YugabyteDB. type: indexpage +cascade: + unversioned: true showRightNav: true --- diff --git a/docs/content/preview/contribute/core-database/_index.md b/docs/content/preview/contribute/core-database/_index.md index 799da2de6633..66b0042f915b 100644 --- a/docs/content/preview/contribute/core-database/_index.md +++ b/docs/content/preview/contribute/core-database/_index.md @@ -3,7 +3,7 @@ title: Contribute to the core database headerTitle: Contribute to the core database linkTitle: Core database description: Contribute to the core database -image: /images/section_icons/index/quick_start.png +image: fa-light fa-rectangle-terminal headcontent: How to contribute code to the core database menu: preview: @@ -12,64 +12,37 @@ menu: weight: 2910 type: indexpage --- - + {{}} + + {{}} + + {{}} + +{{}} diff --git a/docs/content/preview/contribute/core-database/clion-setup.md b/docs/content/preview/contribute/core-database/clion-setup.md index b95f12a33275..ccb33c7520f4 100644 --- a/docs/content/preview/contribute/core-database/clion-setup.md +++ b/docs/content/preview/contribute/core-database/clion-setup.md @@ -3,7 +3,7 @@ title: Configure a CLion project headerTitle: Configure a CLion project linkTitle: Configure a CLion project description: Configure a CLion project for building YugabyteDB using cmake or ninja. -headcontent: CLion project setup. +headcontent: Use the CLion IDE with YugabyteDB menu: preview: identifier: configure-clion @@ -12,42 +12,52 @@ menu: type: docs --- -There are two options for build systems that you can use with YugabyteDB, [`ninja`](https://ninja-build.org/) and [`make`](https://en.wikipedia.org/wiki/Make_(software)). -Note that the [CMake](https://cmake.org/) meta build system is used in both cases, and it generates build files consumed by the underlying Ninja and Make build systems. +Configure a project in the [CLion](https://www.jetbrains.com/clion/) C/C++ IDE. -* `ninja` is faster than `make`, especially for rebuilding mostly-built projects, but CLion has limited support for `ninja` (for example, it doesn't allow you to [rebuild individual files](https://youtrack.jetbrains.com/issue/CPP-17622)). -* `make` is well-supported by CLion, but slower, particularly for rebuilding mostly-built projects, compared to `ninja`. +## Configure a CLion compilation database project -### Configure a CLion project for YugabyteDB +For best performance, configure the project as a compilation database project: -#### Opening the directory +1. Run `./yb_build.sh compilecmds` to generate the `compile_commands.json` file in the `yugabyte-db` directory. -Click **File > Open…** to open the project root directory. +1. Verify that `compile_commands.json` is present in the `yugabyte-db` folder. From the `yugabyte-db` folder, run the following command: -#### Configuring CMake preferences + ```sh + $ find . -name "compile_commands.json" + ``` -##### Setting CMake preferences when using Ninja + You should see output similar to the following: -If you want to build with Ninja, use `build/debug-clang-dynamic-ninja` as a "Generation path" and add `-G Ninja` into "CMake options": + ```output + ./compile_commands.json + ./build/compilecmds-clang-dynamic-arm64-ninja/compile_commands.json + ./build/compilecmds-clang-dynamic-arm64-ninja/compile_commands/combined_raw/compile_commands.json + ./build/compilecmds-clang-dynamic-arm64-ninja/compile_commands/yb_postprocessed/compile_commands.json + ./build/compilecmds-clang-dynamic-arm64-ninja/compile_commands/yb_raw/compile_commands.json + ./build/compilecmds-clang-dynamic-arm64-ninja/compile_commands/combined_postprocessed/compile_commands.json + ./build/compilecmds-clang-dynamic-arm64-ninja/compile_commands/pg_raw/compile_commands.json + ./build/compilecmds-clang-dynamic-arm64-ninja/compile_commands/pg_postprocessed/compile_commands.json + ./build/compilecmds-clang-dynamic-arm64-ninja/postgres_build/compile_commands.json + ./build/compilecmds-clang-dynamic-arm64-ninja/postgres_build/contrib/compile_commands.json + ./build/compilecmds-clang-dynamic-arm64-ninja/postgres_build/third-party-extensions/compile_commands.json + ``` -![Clion Ninja options](/images/contribute/clion-cmake-options-ninja.png) +1. If `./compile_commands.json` is not there, then make a symlink to the one in the build folder as follows: -##### Setting CMake preferences when using Make + ```sh + ln -s build/compilecmds-clang-dynamic-arm64-ninja/compile_commands/combined_postprocessed/compile_commands.json compile_commands.json + ``` -Select `build/debug-clang-dynamic` as the **Generation path** in **Preferences > Build, Execution, Deployment/CMake”**, and do not specify anything for **CMake options**. + Replace `compilecmds-clang-dynamic-arm64-ninja` as appropriate. -![CLion Make options](/images/contribute/clion-cmake-options.png) +1. Close the CLion project. -#### Reloading the project +1. If you previously opened the folder in CLion, delete the `.idea` folder in the `yugabyte-db` folder: -Use **"File / Reload CMake Project"**. CLion will start updating symbols, which also can take a while. + ```sh + rm -r .idea + ``` -#### Doing the build from CLion +1. Re-open the `yugabyte-db` folder in CLion. -Run from the command line inside project root outside CLion (omit `YB_USE_NINJA=0` if you want to use Ninja): - -```sh -YB_USE_NINJA=0 ./yb_build.sh -``` - -Subsequent builds can be launched also from CLion. +1. When prompted to open the folder as a CMake project or as a Compilation Database project, choose **Compilation Database project**. diff --git a/docs/content/preview/contribute/docs/_index.md b/docs/content/preview/contribute/docs/_index.md index d9d3fbf357ac..e0ecba54f451 100644 --- a/docs/content/preview/contribute/docs/_index.md +++ b/docs/content/preview/contribute/docs/_index.md @@ -3,7 +3,7 @@ title: Contribute to the documentation headerTitle: Contribute to the documentation linkTitle: Documentation description: Contribute to the documentation -image: /images/section_icons/index/quick_start.png +image: fa-light fa-books headcontent: How to contribute to the YugabyteDB documentation menu: preview: @@ -13,63 +13,36 @@ menu: type: indexpage --- - + {{}} + + {{}} + + {{}} + +{{}} From 3d59575dff2c1df4d58abcbc1133d66a67ee684a Mon Sep 17 00:00:00 2001 From: Aishwarya Chakravarthy Date: Fri, 13 Sep 2024 11:00:57 -0400 Subject: [PATCH 03/75] [docs] changed flag "enable_pg_parity_early_access" to "enable_pg_parity_tech_preview" in stable (#23909) * changed flag * more changes * revert edits * removed a release note --- .../preview/reference/configuration/yugabyted.md | 6 +++--- docs/content/preview/releases/ybdb-releases/v2.21.md | 4 ++-- docs/content/preview/releases/ybdb-releases/v2024.1.md | 1 - .../ysql-language-features/postgresql-compatibility.md | 4 ++-- .../stable/reference/configuration/yugabyted.md | 10 +++++----- 5 files changed, 12 insertions(+), 13 deletions(-) diff --git a/docs/content/preview/reference/configuration/yugabyted.md b/docs/content/preview/reference/configuration/yugabyted.md index e5150f2f05e2..a5312d329d14 100644 --- a/docs/content/preview/reference/configuration/yugabyted.md +++ b/docs/content/preview/reference/configuration/yugabyted.md @@ -751,7 +751,7 @@ For on-premises deployments, consider racks as zones to treat them as fault doma : Enable or disable the backup daemon with yugabyted start. Default: `false` : If you start a cluster using the `--backup_daemon` flag, you also need to download and extract the [YB Controller release](https://downloads.yugabyte.com/ybc/2.1.0.0-b9/ybc-2.1.0.0-b9-linux-x86_64.tar.gz) to the yugabyte-{{< yb-version version="preview" >}} release directory. ---enable_pg_parity_tech_preview *PostgreSQL-compatibilty* +--enable_pg_parity_early_access *PostgreSQL-compatibilty* : Enable Enhanced PostgreSQL Compatibility Mode. Default: `false` #### Advanced flags @@ -1240,7 +1240,7 @@ To create a secure multi-zone cluster: ```sh ./bin/yugabyted start --secure --advertise_address= \ --cloud_location=aws.us-east-1.us-east-1a \ - --fault_tolerance=zone + --fault_tolerance=zone ``` 1. Create certificates for the second and third virtual machine (VM) for SSL and TLS connection, as follows: @@ -1614,7 +1614,7 @@ To create the read replica cluster, do the following: --base_dir=$HOME/yugabyte-{{< yb-version version="preview" >}}/node6 \ --cloud_location=aws.us-east-1.us-east-1e \ --read_replica - + ./bin/yugabyted start \ --advertise_address=127.0.0.7 \ --join=127.0.0.1 \ diff --git a/docs/content/preview/releases/ybdb-releases/v2.21.md b/docs/content/preview/releases/ybdb-releases/v2.21.md index 02023544a3cd..a46ed4530786 100644 --- a/docs/content/preview/releases/ybdb-releases/v2.21.md +++ b/docs/content/preview/releases/ybdb-releases/v2.21.md @@ -345,12 +345,12 @@ Converted the `ysql_skip_row_lock_for_update` to an auto-flag to resolve compati We're pleased to announce the tech preview of the new Enhanced Postgres Compatibility Mode in the 2.21.0.0 release. This mode enables you to take advantage of many new improvements in both PostgreSQL compatibility and performance parity, making it even easier to lift and shift your applications from PostgreSQL to YugabyteDB. When this mode is turned on, YugabyteDB uses the [Read-Committed](../../../architecture/transactions/read-committed/) isolation mode, the [Wait-on-Conflict](../../../architecture/transactions/concurrency-control/#wait-on-conflict) concurrency mode for predictable P99 latencies, and the new Cost Based Optimizer that takes advantage of the distributed storage layer architecture and includes query pushdowns, LSM indexes, and [batched nested loop joins](../../../explore/ysql-language-features/join-strategies/#batched-nested-loop-join-bnl) to offer PostgreSQL-like performance. -You can enable the compatibility mode by passing the `enable_pg_parity_tech_preview` flag to [yugabyted](../../../reference/configuration/yugabyted/#yugabyted), when bringing up your cluster. +You can enable the compatibility mode by passing the `enable_pg_parity_early_access` flag to [yugabyted](../../../reference/configuration/yugabyted/#yugabyted), when bringing up your cluster. For example, from your YugabyteDB home directory, run the following command: ```sh -./bin/yugabyted start --enable_pg_parity_tech_preview +./bin/yugabyted start --enable_pg_parity_early_access ``` Note: When enabling the cost models, ensure that packed row for colocated tables is enabled by setting the `--ysql_enable_packed_row_for_colocated_table` flag to true. diff --git a/docs/content/preview/releases/ybdb-releases/v2024.1.md b/docs/content/preview/releases/ybdb-releases/v2024.1.md index b742ec855a64..902c97cdbdd1 100644 --- a/docs/content/preview/releases/ybdb-releases/v2024.1.md +++ b/docs/content/preview/releases/ybdb-releases/v2024.1.md @@ -447,7 +447,6 @@ Rolling back to the pre-upgrade version if you're not satisfied with the upgrade * Directly enables `yb_enable_read_committed_isolation` and `ysql_enable_read_request_caching` on yb-master and yb-tserver processes. {{}} * Simplifies yugabyted by dropping Python2 support and transitioning the script to use Python3, replacing deprecated distutils package with shutil. {{}},{{}} -* Changes the flag name for enabling PostgreSQL feature parity from `enable_pg_parity_tech_preview` to `enable_pg_parity_early_access` in yugabyted. {{}} ### Bug fixes diff --git a/docs/content/stable/explore/ysql-language-features/postgresql-compatibility.md b/docs/content/stable/explore/ysql-language-features/postgresql-compatibility.md index dd820aebdd61..b71ded0bc0b2 100644 --- a/docs/content/stable/explore/ysql-language-features/postgresql-compatibility.md +++ b/docs/content/stable/explore/ysql-language-features/postgresql-compatibility.md @@ -133,12 +133,12 @@ Enables the use of PostgreSQL [parallel queries](https://www.postgresql.org/docs To enable EPCM in YugabyteDB: -- Pass the `enable_pg_parity_early_access` flag to [yugabyted](../../../reference/configuration/yugabyted/) when starting your cluster. +- Pass the `enable_pg_parity_tech_preview` flag to [yugabyted](../../../reference/configuration/yugabyted/) when starting your cluster. For example, from your YugabyteDB home directory, run the following command: ```sh -./bin/yugabyted start --enable_pg_parity_early_access +./bin/yugabyted start --enable_pg_parity_tech_preview ``` Note: When enabling the cost models, ensure that packed row for colocated tables is enabled by setting the `--ysql_enable_packed_row_for_colocated_table` flag to true. diff --git a/docs/content/stable/reference/configuration/yugabyted.md b/docs/content/stable/reference/configuration/yugabyted.md index 443c4383e674..fb928c865aee 100644 --- a/docs/content/stable/reference/configuration/yugabyted.md +++ b/docs/content/stable/reference/configuration/yugabyted.md @@ -324,13 +324,13 @@ Enable point-in-time recovery for a database: Disable point-in-time recovery for a database: ```sh -./bin/yugabyted configure point_in_time_recovery --disable --database +./bin/yugabyted configure point_in_time_recovery --disable --database ``` Display point-in-time schedules configured on the cluster: ```sh -./bin/yugabyted configure point_in_time_recovery --status +./bin/yugabyted configure point_in_time_recovery --status ``` #### admin_operation @@ -747,7 +747,7 @@ For on-premises deployments, consider racks as zones to treat them as fault doma : Enable or disable the backup daemon with yugabyted start. Default: `false` : If you start a cluster using the `--backup_daemon` flag, you also need to download and extract the [YB Controller release](https://downloads.yugabyte.com/ybc/2.1.0.0-b9/ybc-2.1.0.0-b9-linux-x86_64.tar.gz) to the yugabyte-{{< yb-version version="stable" >}} release directory. ---enable_pg_parity_early_access *PostgreSQL-compatibilty* +--enable_pg_parity_tech_preview *PostgreSQL-compatibilty* : Enable Enhanced PostgreSQL Compatibility Mode. Default: `false` #### Advanced flags @@ -1236,7 +1236,7 @@ To create a secure multi-zone cluster: ```sh ./bin/yugabyted start --secure --advertise_address= \ --cloud_location=aws.us-east-1.us-east-1a \ - --fault_tolerance=zone + --fault_tolerance=zone ``` 1. Create certificates for the second and third virtual machine (VM) for SSL and TLS connection, as follows: @@ -1610,7 +1610,7 @@ To create the read replica cluster, do the following: --base_dir=$HOME/yugabyte-{{< yb-version version="stable" >}}/node6 \ --cloud_location=aws.us-east-1.us-east-1e \ --read_replica - + ./bin/yugabyted start \ --advertise_address=127.0.0.7 \ --join=127.0.0.1 \ From ec951f862757bfb9fb198373a5b0cae4160391ab Mon Sep 17 00:00:00 2001 From: Charles Wang Date: Fri, 13 Sep 2024 00:59:56 +0000 Subject: [PATCH 04/75] [PLAT-14590] Fix permission check for otel-collector file existence check Summary: Fix issue where on-prem node will be stuck in `Decommissioned` state due to incorrect removal/stopping of otel collector service during node cleanup. We assume that otel-collector is running only on user systemd. Test Plan: Create 6 node rf3 on-prem universe. Enable audit log export. Validate that `yb-server-ctl.sh` is updated with the new changes containing otel-collector. Perform a replace or remove -> release action. Make sure that the node instance that is removed from the universe moves from `USED` -> `FREE` state. Reviewers: sanketh, nsingh, yshchetinin Reviewed By: nsingh Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D37276 --- .../opscli/ybops/cloud/onprem/method.py | 19 +++--- .../templates/yb-server-ctl.sh.j2 | 14 ++++ .../tasks/install-otel-col.yml | 10 +++ managed/devops/yb-server-ctl.yml | 67 +++++++------------ 4 files changed, 57 insertions(+), 53 deletions(-) diff --git a/managed/devops/opscli/ybops/cloud/onprem/method.py b/managed/devops/opscli/ybops/cloud/onprem/method.py index a66b8a622f74..609e11231e8a 100644 --- a/managed/devops/opscli/ybops/cloud/onprem/method.py +++ b/managed/devops/opscli/ybops/cloud/onprem/method.py @@ -168,14 +168,14 @@ def callback(self, args): # First stop both tserver and master processes. processes = ["tserver", "master", "controller"] - logging.info(("[app] Running control script to stop " + - "against master, tserver and controller at {}").format(host_info['name'])) - self.cloud.run_control_script(processes[0], "stop-destroy", args, - self.extra_vars, host_info) - self.cloud.run_control_script(processes[1], "stop-destroy", args, - self.extra_vars, host_info) - self.cloud.run_control_script(processes[2], "stop-destroy", args, - self.extra_vars, host_info) + if args.clean_otel_collector and args.provisioning_cleanup: + processes.append("otel-collector") + + for process in processes: + logging.info(("[app] Running control script to stop {} at {}") + .format(process, host_info['name'])) + self.cloud.run_control_script(process, "stop-destroy", args, + self.extra_vars, host_info) # Revert the force using of user yugabyte. args.ssh_user = ssh_user @@ -186,8 +186,7 @@ def callback(self, args): "platform-services", "remove-services", args, self.extra_vars, host_info) # Run non-db related tasks. - if ((args.clean_node_exporter or args.clean_otel_collector) - and args.provisioning_cleanup): + if args.clean_node_exporter and args.provisioning_cleanup: logging.info(("[app] Running control script remove-services " + "against thirdparty services at {}").format(host_info['name'])) self.cloud.run_control_script( diff --git a/managed/devops/roles/configure-cluster-server/templates/yb-server-ctl.sh.j2 b/managed/devops/roles/configure-cluster-server/templates/yb-server-ctl.sh.j2 index 2454900476c7..3ac9d5fa6f88 100644 --- a/managed/devops/roles/configure-cluster-server/templates/yb-server-ctl.sh.j2 +++ b/managed/devops/roles/configure-cluster-server/templates/yb-server-ctl.sh.j2 @@ -32,6 +32,7 @@ Daemons: master tserver controller + otel-collector Commands: create - Start the YB process on this node in cluster creation node (only applicable for master) @@ -144,6 +145,9 @@ clean_data_paths() { if [ "$daemon" == "controller" ]; then rm -rf "${MOUNT_PATHS[i]}"/ybc-data fi + if [ "$daemon" == "otel-collector" ]; then + rm -rf "${MOUNT_PATHS[i]}"/otel-collector + fi done print_err_out "Cleaning core files on `hostname`" @@ -259,6 +263,16 @@ case "$daemon" in exit 1 fi ;; + otel-collector) + if [ "$command" == "create" ]; then + echo "create command is not valid for otel-collector" + exit 1 + fi + if [ "$command" == "start" || "$command" == "stop" ]; then + echo "stop and start command is not valid for otel-collector as is systemd only" + exit 1 + fi + ;; *) echo "Invalid Daemon: $daemon" print_help diff --git a/managed/devops/roles/manage_otel_collector/tasks/install-otel-col.yml b/managed/devops/roles/manage_otel_collector/tasks/install-otel-col.yml index 3d97a6836ea8..d351525b67e2 100644 --- a/managed/devops/roles/manage_otel_collector/tasks/install-otel-col.yml +++ b/managed/devops/roles/manage_otel_collector/tasks/install-otel-col.yml @@ -76,6 +76,16 @@ state: link force: yes +- name: Configure | Update yb server ctl script + vars: + mount_paths: "{{ _mount_points | join(' ') }}" + yb_cores_dir: "{{ yb_home_dir }}/cores" + template: + src: "roles/configure-cluster-server/templates/yb-server-ctl.sh.j2" + dest: "{{ yb_home_dir }}/bin/yb-server-ctl.sh" + owner: "{{ user_name }}" + mode: 0755 + - name: Install OpenTelemetry collector | Check logs cleanup script exists stat: path: "{{ yb_home_dir }}/bin/zip_purge_yb_logs.sh" diff --git a/managed/devops/yb-server-ctl.yml b/managed/devops/yb-server-ctl.yml index e032715ec04e..e022fc5afde0 100644 --- a/managed/devops/yb-server-ctl.yml +++ b/managed/devops/yb-server-ctl.yml @@ -48,41 +48,44 @@ - name: Set the systemd prefix set_fact: systemd_prefix: "{{ 'systemctl --user' if systemd_user.stat.exists else 'sudo systemctl'}}" + - name: Set systemd name + set_fact: + systemd_process: "{{ process if process == 'otel-collector' else 'yb-' ~ process }}" - name: Print systemd_prefix debug: var: systemd_prefix - block: - name: Reload daemon shell: "{{ systemd_prefix }} daemon-reload" - - name: Enable the {{ process }} - shell: "{{ systemd_prefix }} enable yb-{{ process }}" + - name: Enable the {{ systemd_process }} + shell: "{{ systemd_prefix }} enable {{ systemd_process }}" when: command == 'start' - - name: Perform {{ command }} on the {{ process }} - shell: "{{ systemd_prefix }} {{ command }} yb-{{ process }}" + - name: Perform {{ command }} on the {{ systemd_process }} + shell: "{{ systemd_prefix }} {{ command }} {{ systemd_process }}" when: command == 'start' or command == 'stop' - - name: Disable the {{ process }} - shell: "{{ systemd_prefix }} disable yb-{{ process }}" + - name: Disable the {{ systemd_process }} + shell: "{{ systemd_prefix }} disable {{ systemd_process }}" when: command == 'stop' - name: Reload daemon shell: "{{ systemd_prefix }} daemon-reload" when: command != "stop-destroy" - - name: Stop the {{ process }} process with systemd on destroy + - name: Stop the {{ systemd_process }} process with systemd on destroy block: - name: Reload daemon shell: "{{ systemd_prefix }} daemon-reload" - - name: yb-{{ process }} status output - shell: "{{ systemd_prefix }} status yb-{{ process }}" + - name: "{{ systemd_process }} status output" + shell: "{{ systemd_prefix }} status {{ systemd_process }}" register: systemd_status_output ignore_errors: True - name: Print systemd_status_output.rc debug: msg: "{{ systemd_status_output.rc }}" - - name: Perform stop on the {{ process }} - shell: "{{ systemd_prefix }} stop yb-{{ process }}" + - name: Perform stop on the {{ systemd_process }} + shell: "{{ systemd_prefix }} stop {{ systemd_process }}" when: systemd_status_output.rc != 4 - - name: Disable the {{ process }} - shell: "{{ systemd_prefix }} disable yb-{{ process }}" + - name: Disable the {{ systemd_process }} + shell: "{{ systemd_prefix }} disable {{ systemd_process }}" when: systemd_status_output.rc != 4 - name: Reload daemon shell: "{{ systemd_prefix }} daemon-reload" @@ -107,13 +110,9 @@ - block: - set_fact: prometheus_systemd_unit_dir: "/lib/systemd/system" - otel_systemd_dir: "/etc/systemd/system" - set_fact: prometheus_systemd_unit_dir: "/usr/lib/systemd/system" when: ansible_os_family == "Suse" - - set_fact: - otel_systemd_dir: "{{ yb_home_dir }}/.config/systemd/user" - when: ansible_os_family != 'RedHat' or (ansible_distribution_major_version != '7' and not (ansible_distribution == 'Amazon' and ansible_distribution_major_version == '2')) - block: - name: Check node exporter service exists @@ -140,37 +139,11 @@ become_method: sudo when: clean_node_exporter is defined and clean_node_exporter|bool - - block: - - name: Check otel collector service exists - stat: - path: "{{ otel_systemd_dir }}/otel-collector.service" - register: otel_collector_stat - - name: Log otel_collector_stat - debug: - var: otel_collector_stat - - name: Stop otel collector service - service: - enabled: yes - name: otel-collector - state: stopped - become: yes - become_method: sudo - when: otel_collector_stat.stat.exists - - - name: Delete otel collector service - file: - path: "{{ otel_systemd_dir }}/otel-collector.service" - state: absent - become: yes - become_method: sudo - when: clean_otel_collector is defined and clean_otel_collector|bool - - name: Perform daemon-reload for removed services shell: cmd: "systemctl daemon-reload" become: yes become_method: sudo - when: process == "thirdparty" and command == "remove-services" - name: Removing platform services @@ -266,6 +239,14 @@ become: yes become_method: sudo + - name: Delete otel collector service + file: + path: "{{ systemd_dir }}/otel-collector.service" + state: absent + become: yes + become_method: sudo + when: clean_otel_collector is defined and clean_otel_collector|bool + - name: Perform daemon-reload for removed services shell: cmd: "systemctl daemon-reload" From 77f1c3a3ba5946f82a2a34b7b31418476e647304 Mon Sep 17 00:00:00 2001 From: Charles Wang Date: Mon, 9 Sep 2024 23:34:17 +0000 Subject: [PATCH 05/75] [PLAT-15212] Failover local provider test should validate safetime is restored during failover Summary: Modify DR failover local provider test to make sure safetime is restored as expected. We simulate a row insert after the safetime we get. When we restore to this safetime, it will not contain this row as expected. Need to wait for https://phorge.dev.yugabyte.com/D37310 for fix Test Plan: Local provider test succeeds. Reviewers: hzare Reviewed By: hzare Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D37921 --- .../commissioner/tasks/local/DRDbScopedLocalTest.java | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/DRDbScopedLocalTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/DRDbScopedLocalTest.java index 57055d69cec4..05ce54c5a707 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/DRDbScopedLocalTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/DRDbScopedLocalTest.java @@ -462,6 +462,11 @@ public void testDbScopedFailoverRestart() throws InterruptedException { assertEquals(dbs.size(), targetDbIds.size()); assertEquals(targetDbIds.size(), safeTimeResp.safetimes.size()); + // Insert new rows that will not be restored by PITR safetime as we have already + // gotten the safetime earlier. + insertRow(sourceUniverse, tables.get(0), Map.of("id", "8", "name", "'val8'")); + validateRowCount(targetUniverse, tables.get(0), 2 /* expectedRows */); + // Failover DR config. DrConfigFailoverForm drFailoverForm = new DrConfigFailoverForm(); drFailoverForm.primaryUniverseUuid = targetUniverse.getUniverseUUID(); @@ -539,6 +544,11 @@ public void testDbScopedFailoverChangeReplica() throws InterruptedException { assertEquals(dbs.size(), targetDbIds.size()); assertEquals(targetDbIds.size(), safeTimeResp.safetimes.size()); + // Insert new rows that will not be restored by PITR safetime as we have already + // gotten the safetime earlier. + insertRow(sourceUniverse, tables.get(0), Map.of("id", "8", "name", "'val8'")); + validateRowCount(targetUniverse, tables.get(0), 2 /* expectedRows */); + // Failover DR config. DrConfigFailoverForm drFailoverForm = new DrConfigFailoverForm(); drFailoverForm.primaryUniverseUuid = targetUniverse.getUniverseUUID(); From 04a36f8ac8d96699fef633635ba8e498dbbb1d69 Mon Sep 17 00:00:00 2001 From: Aishwarya Chakravarthy Date: Fri, 13 Sep 2024 13:22:15 -0400 Subject: [PATCH 06/75] [DOC-444] [2.23] Lightweight DB clone (#23515) * diagram pending * landing page * some edits and yugabyted equivalent * Apply suggestions from code review Co-authored-by: Dwight Hodge <79169168+ddhodge@users.noreply.github.com> * updates from review * added limitations * Apply suggestions from code review Co-authored-by: Dwight Hodge <79169168+ddhodge@users.noreply.github.com> * changes from review * minor edits * added extra detail and yugabyted commands * expanded the example as per suggesstions * renamed file * review comments --------- Co-authored-by: Dwight Hodge <79169168+ddhodge@users.noreply.github.com> Co-authored-by: Dwight Hodge --- .../preview/manage/backup-restore/_index.md | 6 + .../backup-restore/instant-db-cloning.md | 290 ++++++++++++++++++ .../images/manage/backup-restore/db-clone.png | Bin 0 -> 36545 bytes 3 files changed, 296 insertions(+) create mode 100644 docs/content/preview/manage/backup-restore/instant-db-cloning.md create mode 100644 docs/static/images/manage/backup-restore/db-clone.png diff --git a/docs/content/preview/manage/backup-restore/_index.md b/docs/content/preview/manage/backup-restore/_index.md index ea98923f63e3..23fcf4b2be8d 100644 --- a/docs/content/preview/manage/backup-restore/_index.md +++ b/docs/content/preview/manage/backup-restore/_index.md @@ -55,4 +55,10 @@ In some circumstances, a backup can fail during high DDL activity. Avoid perform href="point-in-time-recovery/" icon="fa-light fa-timeline-arrow">}} + {{}} + {{}} diff --git a/docs/content/preview/manage/backup-restore/instant-db-cloning.md b/docs/content/preview/manage/backup-restore/instant-db-cloning.md new file mode 100644 index 000000000000..e7234ac2574d --- /dev/null +++ b/docs/content/preview/manage/backup-restore/instant-db-cloning.md @@ -0,0 +1,290 @@ +--- +title: Instant database cloning +headerTitle: Instant database cloning +linkTitle: Instant database cloning +description: Clone your database in YugabyteDB for data recovery, development, and testing. +badges: tp +menu: + preview: + identifier: instant-db-clone + parent: backup-restore + weight: 706 +type: docs +--- + +Instant database cloning in YugabyteDB allows you to quickly create a zero-copy, independent writable clone of your database that can be used for data recovery, development, and testing. Cloning is both fast and efficient because when initially created, it shares the same data files with the original database. Subsequently, as data is written to the clone, the clone stores its own changes as separate and independent delta files. Although they physically share some files, the two databases are logically isolated, which means you can freely play with the clone database, perform DDLs, read and write data, and delete it without affecting the original database. + +You can create clones as of now, or as of any time in the recent past, within a configurable history retention period. This is particularly useful for data recovery from user or application errors. + +![Database clone](/images/manage/backup-restore/db-clone.png) + +Cloning has two main use cases: + +- Data recovery. To recover from data loss due to user error (for example, accidentally dropping a table) or application error (for example, updating rows with corrupted data), you can create a clone of your production database from a point in time when the database was in a good state. This allows you to perform forensic analysis, export the lost or corrupted data from the clone, and import it back to the original database. For instance, if you dropped a table by mistake at 9:01, then detected this error at 10.45, you want to recover the lost data as it was at 9:00 (just before the table drop). At the same time, you don't want to lose any new data added to other tables between 9:01 and 10:45. With database cloning, you can create a clone of the database as of 9:00 (before the table drop) and copy the data in the table from the cloned database to the production database. + +- Development and testing. Because the two databases are completely isolated, you can experiment with the cloned database, perform DDL operations, read and write data, and delete the clone without impacting the original. Developers can test their changes on an identical copy of the production database without affecting its performance. + +## Enable database cloning + +To enable database cloning in a cluster, set the yb-master flag `enable_db_clone` to true. Because cloning is in {{}}, you must also add the `enable_db_clone` flag to the [allowed_preview_flags_csv](../../../reference/configuration/yb-master/#allowed-preview-flags-csv) list. + +For example, to set these flags when creating a cluster using yugabyted, use the `--master_flags` option of the [start](../../../reference/configuration/yugabyted/#start) command as follows: + +```sh +--master_flags "allowed_preview_flags_csv={enable_db_clone},enable_db_clone=true" +``` + +You can also set the runtime flags while the yb-master process is running using the yb-ts-cli [set_flag](../../../admin/yb-ts-cli/#set-flag) command as follows: + +```sh +./bin/yb-ts-cli --server-address=master_host:7100 set_flag allowed_preview_flags_csv enable_db_clone +./bin/yb-ts-cli --server-address=127.0.0.1:7100 set_flag enable_db_clone true +``` + +## Clone databases + +### Prerequisites + +- [Create a snapshot schedule](../../../manage/backup-restore/point-in-time-recovery/#create-a-schedule) for the database you want to clone. + + For example, creating a snapshot schedule with retention period of 7 days allows you to create a clone of the original database to any time in the past 7 days. + +- You have to trust local YSQL connections (that use UNIX domain sockets) in the [host-based authentication](../../../secure/authentication/host-based-authentication/). You have to do this for all YB-TServers in the cluster. You can do this when starting the YB-TServer process by adding the authentication line `local all all trust` to the [ysql_hba_conf_csv](../../../reference/configuration/yb-tserver/#ysql-hba-conf-csv) flag. + + For example, if you are using yugabyted you can use the `--tserver_flags` option of the `start` command as follows: + + ```sh + --tserver_flags "ysql_hba_conf_csv={host all all 0.0.0.0/0 trust,local all all trust}" + ``` + +{{}} +Do not override your default host-based authentication rules when trusting the local connection. You may need to add additional authentication lines to `ysql_hba_conf_csv` based on your specific configuration. For more information, see [host-based authentication](../../../secure/authentication/host-based-authentication/). +{{}} + +### Clone a YSQL database + +Because YugabyteDB is PostgreSQL compatible, you can create a database as a clone of another using the `TEMPLATE` SQL option of `CREATE DATABASE` command as follows: + +```sql +CREATE DATABASE clone_db TEMPLATE original_db; +``` + +In this example, `clone_db` is created as a clone of `original_db`, and contains the latest schema and data of `original_db` as of current time. + +To create a clone of the original database at a specific point in time (within the history retention period specified when creating the snapshot schedule), you can specify the [Unix timestamp](https://www.unixtimestamp.com/) in microseconds using the `AS OF` option as follows: + +```sql +CREATE DATABASE clone_db TEMPLATE original_db AS OF 1723146703674480; +``` + +### Clone a YCQL keyspace + +You can create a clone in YCQL using the yb-admin `clone_namespace` command as follows: + +```sh +./bin/yb-admin --master_addresses $MASTERS clone_namespace ycql.originaldb1 clonedb2 1715275616599020 +``` + +In this example, `clonedb2` is created as a clone of `originaldb1` as of 1715275616599020 Unix timestamp. + +### Check the clone status + +To check the status of clone operations performed on a database, use the yb-admin `list_clones` command and provide the `source_database_id` (YSQL) or `source_namespace_id` (YCQL), as follows: + +```sh +./bin/yb-admin --master_addresses $MASTERS list_clones 00004000000030008000000000000000 +``` + +```output +[ + { + "aggregate_state": "COMPLETE", + "source_namespace_id": "00004000000030008000000000000000", + "seq_no": "1", + "target_namespace_name": "testing_clone_db", + "restore_time": "2024-08-09 21:42:16.451974" + }, + { + "aggregate_state": "COMPLETE", + "source_namespace_id": "00004000000030008000000000000000", + "seq_no": "2", + "target_namespace_name": "dev_clone_db", + "restore_time": "2024-08-09 21:42:55.048663" + } +] +``` + +You can find the `source_database_id` or `source_namespace_id` from the [YB-Master leader UI](../../../reference/configuration/default-ports/#servers) under the `/namespaces` endpoint. + +In this example, two clones were made of the source database `00004000000030008000000000000000` that are COMPLETE. The two clones are `testing_clone_db` and `dev_clone_db` and they each have a unique `seq_no` used to identify each clone operation from the same source database. + +You can check the status of a specific clone operation if you have both the `source_database_id` (YSQL) or `source_namespace_id`(YCQL) and the `seq_no` as follows: + +```sh +./bin/yb-admin --master_addresses $MASTERS list_clones 00004000000030008000000000000000 2 +``` + +```output +[ + { + "aggregate_state": "COMPLETE", + "source_namespace_id": "00004000000030008000000000000000", + "seq_no": "2", + "target_namespace_name": "dev_clone_db", + "restore_time": "2024-08-09 21:42:55.048663" + } +] +``` + +Use the `list_clones` command to check whether a clone operation completed successfully or not. + +Note that the cluster doesn't allow you to perform two clone operations concurrently on the same source database. You have to wait for the first clone to finish until you can perform another clone. + +### Example + +The following example demonstrates how to use a database clone to recover from an accidental table deletion. + +1. Create a local cluster using [yugabyted](../../../reference/configuration/yugabyted/): + + ```sh + ./bin/yugabyted start --advertise_address=127.0.0.1 \ + --master_flags "allowed_preview_flags_csv={enable_db_clone},enable_db_clone=true" \ + --tserver_flags "ysql_hba_conf_csv={host all all 0.0.0.0/0 trust,local all all trust}" + ``` + +1. Start [ysqlsh](../../../admin/ysqlsh/) and create the database: + + ```sh + ./bin/ysqlsh + CREATE DATABASE production_db; + ``` + +1. Create a snapshot schedule that produces a snapshot once a day (every 1,440 minutes), and retains it for three days (4,320 minutes): + + ```sh + ./bin/yb-admin --master_addresses ip1:7100,ip2:7100,ip3:7100 create_snapshot_schedule 1440 4320 ysql.production_db + ``` + +1. Create two tables `t1` and `t2`, and add some data: + + ```sql + ./bin/ysqlsh + yugabyte=# \c production_db; + production_db=# CREATE TABLE t1 (k INT, v INT); + production_db=# INSERT INTO t1 (k,v) SELECT i,i%2 FROM generate_series(1,5) AS i; + production_db=# SELECT * FROM t1 ORDER BY k; + ``` + + ```output + k | v + ---+--- + 1 | 1 + 2 | 0 + 3 | 1 + 4 | 0 + 5 | 1 + (5 rows) + ``` + + ```sql + production_db=# CREATE TABLE t2 (key INT, c1 TEXT); + production_db=# INSERT INTO t2 (key,c1) SELECT i,md5(random()::text) FROM generate_series(1,5) AS i; + production_db=# SELECT * FROM t2 ORDER BY key; + ``` + + ```output + key | c1 + -----+---------------------------------- + 1 | 450e6c49f86c76d944375e29e48f2dee + 2 | b934a3bdf7438458a85b0858c41f731c + 3 | 08697ed89ec387e714c6587e522d7a7e + 4 | a879ff99872b3c3433803d3c3229f0cf + 5 | 4d46a53780a7a348179e1af9b692e95e + (5 rows) + ``` + +1. Determine the exact time when your database is in the correct state. This timestamp will be used to create a clone of the production database from the point when it was in the desired state. Execute the following SQL query to retrieve the current time in UNIX timestamp format: + + ```sql + production_db=# SELECT (EXTRACT (EPOCH FROM CURRENT_TIMESTAMP)*1000000)::decimal(38,0); + ``` + + ```output + numeric + ------------------ + 1723243720285350 + (1 row) + ``` + +1. To simulate a user error, drop the table `t1`. + + ```sql + production_db=# DROP TABLE t1; + ``` + + ```output + DROP TABLE + ``` + +1. Meanwhile, as table `t2` is still accepting reads/writes, insert 2 more rows as follows: + + ```sh + INSERT INTO t2 (key,c1) SELECT i,md5(random()::text) FROM generate_series(6,7) AS i; + ``` + +1. Now, if you try to query table `t1`, notice that the table is dropped and there is no way you can query it. + + ```sql + production_db=# SELECT * FROM t1 ORDER BY k; + ``` + + ```output + ERROR: relation "t1" does not exist + LINE 1: SELECT * FROM t1 ORDER BY k; + ``` + +1. Create a database `clone_db` using `production_db` as the template and using the timestamp generated in step 4. + + ```sql + production_db=# CREATE DATABASE clone_db TEMPLATE production_DB AS OF 1723243720285350; + ``` + + ```sql + \c clone_db + ``` + + ```output + You are now connected to database "clone_db" as user "yugabyte". + ``` + + ```sql + clone_db=# SELECT * FROM t1 ORDER BY k; + ``` + + ```output + k | v + ---+--- + 1 | 1 + 2 | 0 + 3 | 1 + 4 | 0 + 5 | 1 + (5 rows) + ``` + + You now have two isolated databases that can serve reads and writes independently. `clone_db` contains all the data from `production_db` at the specified timestamp, which means you can read table `t1` that was dropped. To recover the lost data, copy the data from table `t1` back to `production_db` by exporting the data from the clone and importing it into `production_db`. Alternatively, you can switch the workload to `clone_db`. + +When you are done, you can clean up by dropping the clone as you would any database, by using the DROP DATABASE or DROP KEYSPACE command. The clone is deleted, along with any post-compaction uncompacted files from the original database. + +## Best practices + +Although creating a clone database is quick and initially doesn't take up much added disk space as no data is copied, a clone does create an independent set of logical tablets. Increasing the number of tablets can cause: + +- Higher CPU usage due to the additional tablets +- Increased memory consumption from the extra tablets +- Increased disk use after compaction of either the clone or the original database. This is because both original and post-compaction data files must be kept on disk for access by whichever database did not do the compaction. For example, if compaction is performed on the original database, new compacted files are generated which serve reads for the original database. The old data files are retained on disk to serve reads for the clone database. Whenever the clone or original database is deleted, the cluster only cleans the unused data files. + +## Limitations + +- Cloning is not currently supported for databases that use sequences. See GitHub issue [21467](https://github.com/yugabyte/yugabyte-db/issues/21467) for tracking. +- Cloning to a time before dropping Materialized views is not currently supported. See GitHub issue [23740](https://github.com/yugabyte/yugabyte-db/issues/23740) for tracking. diff --git a/docs/static/images/manage/backup-restore/db-clone.png b/docs/static/images/manage/backup-restore/db-clone.png new file mode 100644 index 0000000000000000000000000000000000000000..6c82066d9d5cee921dd49c42b64206c5e45d676f GIT binary patch literal 36545 zcmeFYWmuGL*FOqKNeH46BHfLo^ni$jgot#AFdz&=58VieNS8>bA`Cs$Fd%|-NDdv+ zE#0s$^w#@*_w(!z`#AQ8{oy~yam`$F%~@-$bJcI16RM&1n3&)$0R{#Jv67;^CI$u$ zI|c?OJ3cP(#9)dp83Thz%Ie`m4W)+<=`|efEUaw67#ND7@j7_A&)Uh;3^W-kFt9Y` z>;e>t=~-TrgFJ59yk-WmGvEfir7_a7-*|~hUCWs@f?3MsLjEn-TlSHx9tEMxf-1ps zZL!yu*Yw2cv~SPZQdf;rCq{%h$Gq=_mg%rKNXiLCy;wqb`Om;Wh>+ZhP?= zL9+l6_4kA?*bK((O;iMi98FdPb?Te9=sJU|sW8eo6S>+kKpUL%R6%t_pE8H>N@L&f z-Nwx5@f#5p6^$fu;A|H8{+OurNqW*jpjykc&_J+I&BQx~!uMelZT?@q9B#OdUFt<# zLTkkO8!5F*@-39*%)m~;Fl1^f1#i=5(!;m?jM?FxfaR~Z{5E5#K4zM z48e6~aQWeMt%*V8^#k3wTU43z1N}jb_ukAB$bKXse6UTrCR?WwMR)t--Y9o_*z*>- zwSE?xgqVge@ojS*FQwuv*h=C~iUo1*xUcnw;cQ;)MU!~Zywot7f*Km@yGvA)P=$0&! z^nF0F{u$J)OgVUSJ~Es#h0uhQGUvU-Fb^q9GY3ri4I$RcJ7x}_AF%1KL?&N%lryiR zYA!ksa_A^^dx>cnMc^%IWJpR=+;)qv<45o#uARn2fBY}F{(hJ_CAt2RQ8=&RsD0E= z<-S`))gUYZKa2toaMO}(Fb4J{tH<~hFr;8>| zlS?7|Dtn_Yq2qCAa287dThz@9CUFW8+AdK%5xvWXCA`rsB0_vwZc6Ko3 zxAd30yNbdR!c`1+^x(dQk&NhyOpo#q51)wMi_8-*w;PwEw+!CUH20W&EDamEUr5Ve z603V{IDjT`HgjlSzP=VW`&-9ud$9Od-)8E%@-pQ`iJ=Gk54@l5-NDc6nCkOOWlLkd zX<<&zulqBM{)N4~Wa~{LOEya#4D)GE8|E21+xL5z4|9yTu5qs&LFEnn~d`Yptf^NUT zYk$GmC4H;N>H2#t3%{BS>^Oh$3t?Rn5r6m#wvlT${5HN^VZvWey=d0Hc$7vy zxcPC8XacinMne*FX*PCS@93qMRQ;{ursVB4H@2w|U#9Q)!PuSw6#fq>RodvEFvO8| zTpzz4a@}&aeDIOmV`ef~=&(G=5jX3t@;3vaVWHG-JVQUefi|}``;dj^sF$%FM`bA* zYGUTUu862ec8+llAAEEI(q>4GN^NhPzg_mm@Ch&57GpZS36l(yZ{8~nZVux(c7k+) z@UpnF@P5S_xs@!hJZnvOzVx7*H93qI#)C-kNbpEMQF!#|+mmn4SQH+85_k-TcSy$* za;9?9Jx6G5XpMfJd!GAQMa%V5nkFjyBv>I^ukXx?qXKJ;Oka3|XoF!R@UBt!)9~yT zon*LnruIh{)s0;7N0XYVxzbNjiYUYLjFUV2#>8)f<8ciF3+}6A%gwz}Kq@q_8ac&T z#90(AzU(&oKp~7oDC%qL%j$catHF}*SH zm}i9n;?%jq*$_qs%Uji2zK34<{>>TgELJO4s{$igp6Wjr>Uu>9dH-T0%M{U6MC@5C zYS?R{o}u%(N@pu?ip@LNC(5Vt!tvbW91pkJKUMZ!HM4bGn0=#;HzpUr3ktA zd^atj6nlqwXQ7aupK0|ef6zJ1s$j)Z5K@j^{<0ipLDH4K_+;04_xYmgqBl7!xf!>G z?x5}jB1cz%N00}tn^xsz2ZqL*Bffl|`aG`R`@zh}+{ny(I@n^)QgAe{V=iUfq;aWv8W>;cIElW8oCMT3-EsqTRe62I{duB`Kz*F@7ucafU zQ`-xQVbL5+iII{rJIS%h&msmyK6++XC9-WsIT>`R+)fIdP;IjYD#fRjU|nBy;^*^h@&20>9=IlYnDcb znm`fM-Ecc!*qBM(Ky@NLEB0SCE<>~FPTwEfdsiMg9|?Fp^AVh=ri;|$yOx5*j2UumsvI>UIQkK*^h?y2{x8Gy@Ql|P`?4f~-2s9DkbqOU z3AwzG6atpl!6CdP&?QO-w-jqNllPBDx=I%9hkjQ7+@FnPTxVpy8E*FdzNwjZL=Bx4 z7027cNUWzRpW>gAWWlr99@B+0w8*z=HA{xm+>u~9@VX~ut2t3r?YfP*tunX8GQzBQ z>jT-)tyVT|wt97fC;HKY_g&^klFaznsF?|4sWlZjLOI5towVYF8%PyF>c)>^wyRim z)!&QH{*Vt06}uIwu9@DP>dYS>^&zg7YoAc`t--eq^|_qqdHdTVb;5NNUcMKcK9OG@ zm&pC(o@DcWDf&jcke`ri-}ZSQ z^I%~{t2;j{pJ!k_kLSZdA$#?5)yk3c)LtG{p5S87a!UJ$8Zfy47gT$|W_NVg(zW|~ zQn?c^%=TbnIxO|A&h3WOM4@7b`o1%1KLwma@~ zt!dxajxX{+di1Y`Efg&z4{M+m(awe8y5l81FDJ`jCc~CR4SF{9dT@<;eK#ltZ0hN%_LliEPhE_ec}T zcHf;`$i=gy|!_OP0Vu{JfOP zqqvx?xCWT92_ISS{cQ~S0gN|?*q_2WHK->F+7IIen7(>{vFJfpObGq7ds4b|#;}#% z3(8w1@R7bGWH$B!#Z)YyfqddESXaqHRTYC1xW~u9#=MJx1KeQ(moz5z-+Kj2c8qJk zo?~HPgjiu<|IVBXZe|DNH8r<; z3FdW!+Fy->A?YR#+(N<5ru1%58(SxFHz~$nEyRKQtH*qd^uL-oL!=mWRW;}z+Bt&h zMR@sn`5C1N=;`St9nCGoHRTol=?;98VtnQ7Y%k8o=j!Up>ng}==V-}yUrbDlk6(aK zK!68m!Qul=AW9!89dyqfl$b+5C9Ifn~t?X>+uf{cfX$NzbVr0CU=ByM z|ITFV^bc78fqYlL@ZIO-=leT0&{gv4sknxf8`wrq-UW{?pun8Y(*MfqpU(gD$A3CX@?DYsKUndb&%d4m zjFu*l zKIIWsGJ1`JiA@T^_|G@?KoBt|m}+p81q+|v592@H9$;cA^J4!$^i9UV1dlF{-v5tY zzeXa}a3ub3L(=x_dDwF!C2H%<@SQW!1!>>)Rfe9$b9ec7*SbwaJ1{}+nekCY8^bRJt zDdDk-bElaH3D?anC%4eUMa9#H9}o1>tsYH&xRv96s3IseUtC^R*u@_f4+dKp)Gl7= zz1%J8KCtgN;fEQxH1Truw!z0(Q{15`H(Q16IWZXYGf2} zb$Kj@k}N8>&RJS`OCEtHR}B;gdMI#o_G_SEy(Cj|uFj6G%H~BAzS3h+bI%80@AtKc z?)Nb}DTc24BsF~KrXKEfhlG=R^G+#2l6XOqYE69eV(jw{#{TD03vFD7a;TY^#>Fc7TpWHABxh8)SM!y*pL1=q< z=hI(_^~+CE`iy(DOG(-Ce=Hd;Pt-e@@>!~-As2V?(0m4AE(SI!djUw*sbFyMQHMUB zOT_O<{+B@pHDHmeu_DD!CnJbSK^Z65)>+}21K)MM?|f|eWnCXC`1v`ur6}LP{=*^m z2#i^C2SbiK{>o$-EMS&Y;?)3PWFi^Bfy}!0h`s#dLFg-hhOan9g(_R(lug-EE zJ81+5S2Pew05h4oc8Ia9cXes(sY2SV4L)lATSAtUgXjvw7a2Es$8uh z=T%=+%>WlbG0cz*8!iY;@8Fd$7}jqld>DEwaY(= zF$##hQscFC_yw5WP(k>BaMJAAMSI4d*xwVelHy0SCuIGnFu#c3+f6i#m@>Nl9 z4bg14m=beMTDL`;y<59qlrz2}tRt%;FvXG8Kq5@+agH9W@zD2%FrIjtHc+$Z2Vk;H z)~USivAx>aro6z;?&8C^LHCD-pV)w5%VKuMwKGs2^(Z@SI8;QhPkRj6%RoM$qiUn) zSjrZeLkp1x9cB-=)yAQxDjUk#x$cqcu`$fjK6-(lS@UU1ZH=+p-9oq<%svmo??3(E za-!6;R4LWvE&keG_!*wNi<(#>)**;%gJ-!FMPi4wqvMNcbHOfk3XAT z)~)RP+RDmVTWYUY;6YK1j`E?eZG4^)Z**<14d-@{y2{e!Iq0s)=?5Vj0iLz{cTW6Y zh;A*vULi|$v&nRob^j$H{2X8xZV>~$+7%!EQ;&n>=Jz!0mW*|XfkK%=P2QZ4NzUR)_far}siXbt~} z+^koEG7&pfh(NkvTjj3_uiXI@{?onQhkx`v_`M&6#xT4-NNB_BX?sKuA$zs~E!+NG z4UaIG=%(AcXMg!TF|-kuH5fTsb;z(rR@4Yq&Cx?P4lAmUBQ*!AKSPX`rO-aC2&A3r zfaRXHzNj(rX(1O>)hN)#v&glICG#-506Trt7RlQ?HdM*l{|RSxS6sShRJ=DW?j52A z3d!O+aen5w`WS4fh}fHcM7e*JG}$%W`SOt0YU1Kl z^COQRWOZMh*f7v#g9Q-;R-Lob$(@oJmlp#aV<~=<5QV>24OfvX?GJfnT{g;=nz7uXw!42zuYTY zNKIOY5wbPYvbFZXyQY~!YMffgN>NwT8bbU)eU+FB@uYa%#yQMRvA4e}=M;_yd;Gx& zUREz(b(H54G}WJK#Wxb>O|8?+i9CpS-@ zh3s(~%xDi4i4WuS!wR7D&>wQkMPdz!X(T4g#Bngm7F^+1l$S868T~4wyjlZ|S4MfB z2@OkFlgo<(`;t-vAH%9PkZZ?W&M)7)dqg0Gn4v=n$j9QAe1>~P$UGxomQsq#ta0h$ z>g)Li-T^{B7m^erl`=n7QF#|j0MuHs?psc1w4uM&zb3qWkuudPd{hY$sol@NK&)&r zsZiJ)Q~8arO9o$fwA-{W>S8l#i!Gg?zMIf`!Z;TL_mTa4aewYHyTj(!@8`|Nh_ zqhG3%VFGv$pU{y%dPGWjg5^NtS!UFlQIQ*irCbn+$a>jskqG-k+3KMi}zVQN~pk0~RC~ zGHho^MgB@prK5I8#07|t$hAj(;NW~5m0^Ms_g)!gI4<(SL6`7psvkSrAt%JkG_vzK z_s?`1GvEEPZXU^sU8|-0Wj#ejxvvg=EtZ*-5Y=B1<`rfuZtKp7vymZ`OZe7}=h zqKuI6Z-%GWT9MxRgzCxp!$-^P(4e_sO}^!vbOf+&EjjF-^V!$~Y+~ zVa21V-Mfw(vtOT$56e{-E&1eTqOEMb>sxQ8Ftc?z)STp=Q`8!}(iUR=(C^QIn;D6E z@U&trZt84(&p5LuHm(}v9YD<*IQO@E*b1E?9vNi0EYIIZh*POK_bb(A<*9?L{4-#e zRS8uHCI#fjeXkulE8qFs-ah>#?5H)45zf9^Q&&m1x;2r>vP+)Lnkkp`O5>rI>nT&- z?+JyWj5wiJ{P0$}y`30`6^CE+n3DZt-gx?J6o#6%x8%@Yz6l5r!`EjJ@G0J2} zzRp1Z043X719MWh7OUhX*In8$jgq;}j(5Fn;8%R~6^PM-<6lbr3GiOv6KXjUb`aN`H*t~l_)uwhS|()U^KLqz z%;y}%Qaj{&wiH(B2tm$eOl4>sF5q|$>le9i6-ZC2+0|?xOQ(EfK*PtH9{gyDsu)fh zHj=1z?m3Hwd>g4d92$FRn-Jma7SYq<^}eU0(7*rfH+}puqQPs|+0O->4O`N3?I4{! zL3b;pf9N}lyhA6Yi>8rZJa7*F8K?bb?CwBI%lcMzow)gl)tsM2K_V=A)9tKKns#Mj zuQDT*4~{oE8&a||K5`yc3lZHANv=5)OjaDA;8Po&2qr3t8imNNbfAZ0$Y0=5ti82c z$^)mb8DXrLxJnp*;$f=V5k(9qiZpqw$-35(*Hc|nwu2Dg^h{WB*FPw#t;>bedN%bA z=eySMoOzqC{D4w_Gp<&6$v4dyE^(&82!XEE=DJGy1iL&|CtgPx-B@MAT*x?b6ZjQS zx&fy0r?yS#p8%am&tLflyF|q|gO}TDD5E-Iq9-H{b+P@&gX$9_5#7&<`n27%>xPPu z8#~Cz@BEe$#4w*3yg1@yn%vou*$WpJGeGv zs=j68#t!TAt!44>8<5naw<4gKpMtU*b2lo_D-4dskE=2ack-MsP7`g1ow$1S?S_Tm zyi#E%1KU*3u60~QG-36>XhV-zSbOQs&R72(>kwL>Sb=5^wNs9MTmSHBcO3f?KmcLNG2&h`-nkb$hT)T({lo zev`IQOL$A8uwDM;T-l+vIHRH*FchmM6^~e5}f(oUl zHdthQ8H3D9$m?r`?PG6y$dhwx&SzsaTQj^AtL#h$4bmc@qexT3m&H>>z2jY~Z_RGy z=t4ki0<`y>`fAtot0pIm>SD(xJ*Mx>T5Xw&q5M}bii|~kvZ|UFjo?Nhm&0rsM6i3} z2^JlcaSRciJ} zh>h>8o=VnhRcVoza~2(y8a$uj*8+{QP+vHu*vmqniC@C}Aq-Z6=4iOItmVtJkAgfh zt1Uz}W3zZgkd2R+3)V#{&gZqrHKIO|u&SP)g|b=$KoGj13^~Qj#YxhRoEj~rX1-WF z*B2#Lt-2Hvg3WPAyQjJEm<-^g9HeZD(mBIfp%W_9m;DJvVmhxxrzLc{^ig4zr040l zj_N7`m0E$~sgjl?#H(f8BA8K#uNCAWumZ?({;jqOnKPOdy`5nBR^Q3)b?B{9U9=UU z&H&O4EfhY;UC6fNkW(Qy=C0rzKxps;_ml@O)aFTXSQX+0 zLhma)+4cMZO`!AZo|5xM$)#8Rxj2cH=E%+dYs#WGBhx4d^OzvMjYcFrPkYmoq23dX z9y6VWZmEMxUN*s7Ue}bJ${bYSxu3eZ?QSjVzSmz4@+wnpwckoFF6zkRV2~$+NsX?KvspNDb0=B1wbmRh z6wyVcF=j3!k`4RObnNjYHb;pKQtCIr`lD1kwQ|r}$1R!ro5i*o2|H7IDmQGH@#({U zo7QMr4gI9dh)xkAZqwBTD6QwQ1+h1sJ)8z=w82STn^d!=J>E9n3~S%~a4sQrK;ijN zEEN;w)8#`p^b=j)t#t^*-wXPW^&y6ROINvBaHP}VWx5Z@N}_bMexq*BOUR^YQVlNO z+>J{&x$}VdLf=TL%*b^GGZ#9se@17>Z;O60P?T5;8Y7m?@GnGoA$F)m#N;erq;Iea zb%71^ybN)8`Sg%%K8p_p48Wt)?Ip{?YFh6HlI@{<3eXY>zlfSd9jD*~5x zDM%A(=sTm|tGCuV^8=A#GxCHjdbHW1W*EfTmv6er+1PzX9!IVrvaxrILY!4X+W4tO zM;?bO_ece~Be}VSknhTxKK!T&MJrW|Sj-y_HY)s<1G!!>7qnQwxEFqbvRBdibM_Yu;Ej&CiqVwHox52o9-yQR{y-jfBTy3b;V_ei944Wor;z=I z*AE-w9Qe=%olu}U@hsjm8i*9tfP)Z_B)?Z9>}Y(kWBAMeeh2(-X8s82U$}-At6kpE zfUc5wG>|3ZUfD;NWxDs39TlMohMiARVUxfnbKoJY0U$fg*s@=E+JA(kl-HTfQpJO_ zUA$=n!eHkSYvzI?Jz!vynaju}Yb|t#}#Al2dn#_OUird)1j$9p?_C61Au?aXx*T0%B?mFb={i!ZWTQ+Kbn2 z_x^IB1?zQO)6u-sa+3nRREiCNl*%ip-W6%>_5Y2In*iwe*^=MgzgXuTp6>&Ftfe)+ z!}qr;?JqfN%d~D_V(*ZZ@J}$$j+n>l4{zA9dg8(PP5jTqWhb)b6Se;uEPsze8 zjvs>*SDx8l^Z!ioDO5!H$J}v@?gN#qTaoaSxmk=~y!-qNShIL9HR5lLPnna{=8=^I zYYsPujKsqXb^!4n77`-QwITS82+-33Z1JiX`4?_&!CGO+eX6T5dKzmxeIwwK^b@YX zVlz&xFf7{o6A$bc=1z$V5ZE-s?DXa@g_+m_L2=rZsu^acx##Ax82%lkCEF~Xm))^> zseEJ%?+f4EddTx@ktcw(4;V~w{!r)#^2JID=m7BD&SN1y?)I9#w|Grml(fmd-tV&} zM9x*$pS6{79zP(|tCC9qvsqQA1bRi3H%tfaIYk}4NV}9Ujl@Y@l=5YYMpRud`XTar z+&fG_G5G$+fVY3eH^lZrHkU`F^YBf!rLw?qk|Sxm{6xd_cAG29_QmeSxuIwl03yO5 zHXMj_s$>U2uGS91a$PZ1q;ZPVKxOhl+p*iv#$hWG+^vHe8qZnrP}I*v=`}(4D@xxu zp?8PF|!Ypam9o-X)J-jKN&C`BjP>>z4RX7GVgg_?NyEc^YKh`nV(+Y0Gr+ z8$-?L0;UH7&UMcERdUKylUH&fd1;=YZ0Wv`)ZvXpUK8G(^8!pdOv$iK7V zYlU3ZSyI@n|4riv{b$xYf!+Hf&ME5e97c8jv)<16n4VH2;(>?7g4VkafA66r z9n_qHKBLRFiRtfB{_Ka}R?hs&%3XUV`M;k2W!3+mIzJ+R!}oy&fE(Y|QM~=j57JLp z6evqe3=;p{X+M5jw7Z%LY)=xGo6SOw`W)TF;XvA%-YgwSELryoVa`w}|86T%MAi(J8lw3gLHI@MzsK3L7p?IL6IuclQR=JD;gbv8W_) zN5iY8#^NU@Cf4mfC^FAij@!oSn0P;T8Hb9F+9Eg3TC4s4U^(;}w7{<1sV>*~XT!A7 z|C|k>>zIp_;dNT`Zr6aaDQjmH@V)RVo!PG2?TK}mYpH?8wCB`aUYv3?F5Xg? zWI!7_g(F$D4WPpAN3$W|RM>DdK=hDQ>w(iKII;c5w=nQ%8CU?esx|65?JWbBtom0_ z9gM)XB6o3l`w#IeSd){Jywzn<)~z(I&O8%^EE6QYM-LkoQgAdW4DJ2j-4p8%N_4rT zyF3cH=h((jHtlnI{HXqL%CoHHE<{mPHQa!L^)_L-uP@$28#}3qkjmJWM4vDBGe!4{ zhxLb2qh7n+3-8S$9M*co*SJSt1QO3DPoL$P3ztqBGG^uYvk|W|j^o+HE;+9uyJd<)aEjZHP~P>r zi<31YO6!F&k8#^{?}P2b-(}`s2E%^`2rm*^Q&Im!)1VK2L8`+WX#Ji?*-sCbw|p-> z(Zoj-4M#yp-$@uc6n1{T9qG%xzrSyr3cFT+vRbg9({K|1j`F@-VQx;)i-A1V*z?2b zY3QwAMC<5AUzSJpCNzK5yQ+ysA8L>MNQ68{(tUaflu5l;r$4FY(1M-M zE_KaL2Ul)UQIBt$5yf!-0rm7zu&(_Ej74T8Cl;UGo5l98iCVZrsR5#@JbmVCh5;7e z;|GsZ>#TjJuMU!wm(ee?sEZZah^)Pt-wD|?XcIyxS3z_ z$P>W-2d_*OxMRWF<;9%Rhuu3mhO=MI?E;t5eT{kRk6s=Fn$C?tARH$gTH7;2XgQHv z^~kDF$-VCndEZnP&1650JV1MjZlVs2-S_$)e+>v$SMzVp9uXy|M793X5?rn**FP-O`zCWd ze@k>&!^|vOBBBI5o~%zD);3j9i9ob7MN0mtbs0^5LJpiQi3E*In?Oci&d#ezY|QH` zj)o|$eWp5_QABs`q&z%$3#!2>^el#<5Ds{#Z(m!Beb3i?_^o)>3tmCdYh99)f>_B#E&(f{#IP6e3bOIaX!mmO zqfEO~uhde{+WY%v6K>fDb&)a`y8Q+w9yof{Rz|hG(x*0d@uyaB!L8agUeN>dpMasu zz^y7!`tr5e#w>pfVH*pgvHyZu*1=g&tw3{-)Ty898T)9Ps_;XP2<$2;2#)rvwCI5J z*=5GErP#}9HpuR#9|CLT&8H(eI~9k#)I=^a?%ruw)k(z?$_wAkQH6x|Uk*5M zPtL8Z0rm`9rGBo-OyzR90=_-xGmc_oPBcWJPIcbxggvz==}9fWh=Wn^q4r_~x0aU` zM->{})bxrQoi&js$V0EkG%&6HB7cCE@6hbIh&)L&(rFD|#R}V@*($npYI#RRg?S zcCm5kfPUM_m>#5Sy4~EwZmMUzEi=Q2q>xU0*C4}}w65gsYq^F34Xvg@`glKX3QtZ+ z;8GB#02}pahSPOC!0hx##|3%a(T_0Rl**s|k)8gF2N>BzOU{J#J`_)H)7n)2!T{(5;YD=gUK(gC@hy5+@|JBy#*sOeT6c;(Dfe}ngQ#NDNj}5MjRvdZ!lnbDTCb8nTKDt)*_VR<*s^vJ~d3ADy z`_0oA2clcNfCw_0u}pn@eJ^&?*@{sI`iIC9xSe;WcWH*v!AV{84ed#N6$1b!E)VQX z5py!+pY^^;W^viLS zOTTmka1oZ+T&I4!Q0}W^AAM0U=qJ#3U~EU~-n|qHbyZb&>8g7`<+-0`K0Y0}eNCB= z>(qezO~v8s#&7jrH+=lT{y(z0^t(OA^}fG?bE3ZKKN&-qVpi}Fl5^Yh@WRUO+1)~a zeG)>fxcYATLVOFChh6Hrk9Il87hS5xxoAu`4JcP(2-Tw0?exa7o$X(N0mvVFhk(7w z#kY3x10+V~xLy53t508Y#ZAV0c>HY%*muiwT4lr2r?wl=(;x4i5{< zdmMSp*8@M{To)(T+j%G0qqyp3s#N^wiN{6(9Ae>dUIRqOvpCMU7HFe!4+Xa{M8aSx z!?`h)$XTFw-%Z21Vb>`Na^o;1!jd|cC+y@Fm&Wej&a8}bP~@SeSPd_SSy1b_=*@L> zcR}`{(+aUCt3*1IarAt6u03sGG-R?*>4p*ENC=(J2u`oKXDyPBNQ3)Jg>AlgfoK1) zK^PxlW*~e8z7zoi6JMN{K$`Ri^nrrCTyrp9>C7nB#$Im23pQ`hO(u;{tePS-nu31f zZdenss5#tYe`n_ElEKV=0>zrL0R;#rMtT1_Ul&#)O`s9 zwpVUS{R-pND)c+QfT&-mP>Z-Hrs*=_@K|g<>Y?T;WOlAwbeek%W}vNa#gZfdetXYt z#{Vj<88BhD=)GQk`20;`441;^6^;Y6*g@N+?G!2MOi5~&c+`UkRY>tp7%aYqSHh;yAT5mv8 zn7I+xnZ;@2mUUxzUzN<2kdPhZO=Gp!){6HzWXU5jt9_?LCl7E3fr`S%mNK0Y5R`j8c? zTRm;EmFz{+X2k7RfqutRHPP?wU^;zdKz|=dT3kuRnQ{p^oD1(?W)u!RVsO_dDDw}U?JcM>C z;M4Q-`|tVnO9KaaqBjZe(PU41k5Y3d^}diL2F^1B7Edb%U;ubzQyp#OnhFWZ5VUy< zD+WptgcQ8aM)`F`sp6F>g+Mys^NBLg<}}*_$Ze1$%)=-Z%QKh}vd}Xs=pHxVZfO1a z8yco4?##7Dy24Wn$J1c5<GDpWNt#-4JwJ`)YltVj#+bwUZkAQf&u7t=lT`jl?V+XYgf5N_(LnB znn;Pw2YRrf=T6#n`n48|lK8Y&z=IsVE{MuRXdFtF`T&*HlJTzLv=BH#FrV`H=|J_{ z3N@6TbrzGOZEGMvgq!~hkd;}jek9^an748N^=tvD`QadUl2*%-La%>7c?^F{oVY2U zCTHKf%akC~j>_%lg<&6IT$WdW@J;;PQwW?DE{yPQkNP7;^~zsB^zxl$D+6|@?;4E{ zQ?k;)jp@V_hf}cU`+b1Njaa5r4}kzRK%M^i6@2+w_k{)~_zs=u^M9cdO204t5LdF5 zm7b%nwk2u4ZcoFQ?fcq)l0bLt7%mqHJK4zPSEStdM_9?lMr+e*bi+~&Pp8gb;(7)8 zIeu}Ly!i)mqV&OnR?z|HE2i7rX5e_ne24-8Q&WX||BR=>=wEYJ{$hWnT9^DdV9G+4 z{@s_O-r3+ zo+wE-%~V2Gm2%euf!jE?sqv(Ih1}0Ilt5y)iwY3K@pqF2Ctsbkw`$mucO4N~d8aDP$4__o^VaFX1&V>h(<^>FgoWI#{CU61>0FJ;svv4Dd!TnJlV^G1&3mKY8(`z|pnO4&`u zR|Mft9#4xaa}y~0Oi@rxQP0|h*qFQ=`AR`G`uYm_9L3&oPtA|xd&J3*(MA5mqbGWU zLgh!Uwi55kDUQ?m(RRP&2EV?Rhwhnt1_5?muY>jw|5m4)x;745Jm(E1Oll{AOsIH| zsXz*YyHCMNMgYSCGciZtBnr>t9u6V<21(L+88*dQ{RV8Ia^Kk1c6=DZv5&UYMEVYV z5MXbEVh88>;oz<`oSH_&p+oqUZGcpORinFDaN!2~xLw^-1J$WGM+Uv+kyAUTs(QAI zxHS*k@v)b{{@k3l4Z3&?QNH4+RwYQd!C1SJ8}Trs1Z!&RbN3+BOudX=!`7d0Yb7Bq zr_KFTSTWmWzsRI-(lY~vA|%tWAYg04y*|aKM=G-J6xcfp6rZ$0KzY;dTb+bkgv48f zM4r%6-xWeQeBeF4~=2 zZdRs|x{B^CakxntPF?Z{vv5KVy!RU9%o!CXx9v)0G!GW1Pw8jiTU(YMuVgX$(kUaE zG`z7^GG#~5VS8vi3CH@jPxZU%;WycXKny099s9;PhM2jqHDkw3u?wp*@>s6RqgKYy z*Lz-j(aqCt56KA06?X#=o_iFOgX(@~<#7h9QS=6q_>-ljqeX)c5Rnykc6AQR5sP(1fk#0oU} z>C&b@#os(G@CG}|Z1Ugn(`W)Ty_Mx*Xc!0Ytt6{(I9~UM743-krV4vz$o$ zPrO)<$SFAi=@YQaNX}qf61h*7$EK!|`R+dt?M~q+6?0-PeHBf}eNK$H1D3zmaT0oq zMcln?&`QIKhchw7DPnqIoJ9Pb%>jTII(VWkz-skd`TZt#R!VxCf*ltjwHR==kQ1t+ zskvSKr#`M*Q`eoiGw!|O>X5~WTk(BQaFZSPQg@O1h1M2F?jk-B)PE2C)@vFY4;D~- zv$CGFSfX*KCLAaNaA*{wlN`O4c^`w4rdA#1Q(fN%#ZEdcDYqKADtTRaq2Us1Lz%MR zz~DI5(=EQ}aFLiJu1h7Wa3!veLPtqIP&-aQablo%IAJ{>wT&K2$cqsC+e$MN5@uzM zZTh2DhH#telbP1HDFr-fy~DE*0^tL59al|q??I+)^r!L&5_g`k#QnB+tw{56)m3oV z{SWdBrF92taJ!PdkNV@1Xz<2KIxmy{ zR~bI}ct=}-?JgFtEU+?0aiZGCMmH&4E9GO+H&yP>lf!Jxiy1byZsf z9y?qn(VL0JF%br~(IdOPVI#=HJ7M8c90%%1_i^30i>&njtG(}xYieuyJ)l@X1qDQk z6cq?aQL6L^2SvJ}cN78XMIrPmqEaQHcj-0s0HLTTNS6+QP^1Zg&CO?t^6yl8a_Nwo&aYQlYemT=M_p2 zj9A@YO2?csN{ej|0#{S%gCz*7lP=qBrG3oWIFTCldo>sM4mbUrPLDSdKKQrIWm=== za**FHX}qcu8UC`sB*fbAEX%&eueNL>k{dM_J z-rFd7Q>ytyi4*J4&9PilaVO@mesX2(^3}d`^*xW={CeDZ8tcUqAWg z+Vs)g>W@_PT)EAR3t{2!QY0xAGi*=3{DxKKIniN`eSok-Mo5eS)+Dvos2795@QlbG zaV0A~>AJGyx1lI5VA&CjZba>WRmgEcG-kVhdIc|?ta_*V_S*$=%Zq3HdUyv371MUk znQ{zq6t7dS-BJS1e~f&#$vYy&gBkrq#i|g4(Roz|841U3LkSNEtA_H;Lt-9kPFxs` zF{J7rHmFoL_2tGHvWO|#&E>UKchvSBg~G$eV_dNosTJK|LrsCNm*JIF6!RC*L9W#w zk$??NizftZ&Y20)+cA0(M~o~9-6Dyd7K~4Qw?-A)#dhjF)0^Ve|?CRL{vToce=#+ny457B#+QuWOB9 zHD$1;y0_}-ZI5A}Z5!-7PyPH0A$_|x0OxTY0!2ek!;H%tc1-0{9j=N^p`h{2WrhKZyS#lRPLrQ=ezVVKH0^ zqs(_)?O$$seifu6S%1DB1*T}JE1JmrIZ zEJzC}Gf&fl)r=m^Y8rxVBF-H-PBK9jXf=ucKywNb$^68lFF+%bwIABJiaG7`LIOOiwX3dIG7XjKn z`p!g{Q-*bomUI4z^%?DKu*8)0w#5B(XqCv&$Y=_q=&V)tmhD;(YiYrIqdYWieC|Q5 zSp7IParfN#;eP1Hu#v`@Q&^Lx3AAHFAG)-$X8J<%?s#vK`xFLUTGuKGplZ`%T5@|I z%GB_*q%!)vRHmB!#e?xN{G|K6538O4(XK;qeMQ;dY>BsbvEsE)GhZ{yeX#ix^4@)t zox>3c5iu9LC6-@}Sr|qqFAk5@4^+r4#vfR}g&kFXu^|kOi!nzEFzkQXzqz`^;I-0q zJ#VbXsANBLW-}c*rr|f>=#4`^ZEMUMOYA} zDjr7{z17O`OPvTPODo7;9yIXntIy+xE*>$r?sZBn;$EIfPJc6Q5e zC0^HR+^dtpI#kaXPjDyfsyT>t_WDMRi0JD@jdlCu%G8@nT_=Xe4>8H(A1%B-NDAAo zx{VKwKU_DG zE4+SDxT|FX&OqqfyP^DXhh>H?yJ*ym;;Kilf-qh@M6U!W7_t~VUq%k0hf=i ziT(Xo;2l#d8TLK!Sm z!ZT0LsHz8=h~YC_O;fuaOUnmJ*?hqT(aVkLMvlBHsLky}4*MRtsx~dt!5F#u^dvn zA7gisdEP7A)T&fuIjqb!-t_DOq?#N@1!D96?C$;n zdj>=mw82h3%l>t_`D1fGV|C)F|RA|`NmS0q?J@&4WTzKC{q^GkF6suqLw zIMWGbnLJH=B7^rz!J^kuf(&b|J6fw3KbnkmDuYp{Gntx#GQtV`HI3fQO?|R1=3?uE zcK*A07f^Hc!!&ffx57J}38;;ztGnG;BL#gF!H(JC!{x;F9NWHH<7N7|5$~LNF~_D6 zY{er)8es!5En+cod#+SMJpR*v?d|VN4vVs-D1OaPhPu`b`YMu z2N>1XTsCNmXajL)_~^Suoxo(d{IhO`^)uvW!b8EEM-?pT$&bP9YfiH+q7wI#DAZAz z12|vT_1@)7bP*1*++U)(>1I^CAOFdex_#2kg^*D1!FH4ry9+(apdjT?Brp(~T6e9%zBWKe*P_+J2m4YN zHA8)MHw`H^7P_1GxKZ8n^=T*4B7I`v?mz``1?_weBPf=^QdqJq>9=PvR?`}=c@dFu z*r8##%DBda$eC=ZF&MB_K<)s_^R@6RTOH-49WvrHzb_yfuLIT~4sLA5-f-?PS#h>e~NebO>Qu7TL$pn#`O z$c%rMItgcmh=uEc6VF{#!MJU8NTF5>Q57cyW4)&A=;t2*sgDBiCTZUFeBz*RnjD)fm#hbGr=!A$7Pl0oVC;yKA?F{L!sBYG(uLEfh`OJ1QR!KNq z8Is?=td~Xdjt?Z?Z6c&d-g5fvTB*Yrucxx@58w&896Y*i{NyWeI;Af<9hC8f>KkRd(%Gfog~ z*QKr#GKn9--MW{HYqMi+nQGZhcuZ*a%m@-{s+ny5O!p-6x#trDM5^1fk&FNcN6xOe z06l#EkOO{b2_(<6re_8T*E=msI>nzx_v2 z`)Q=}-LG{b8+&D}OJpxt5)gj#!L*hNy`z&})ZAF)7R>+(3YFW{_qpSZ9fEbAW-(VI z;Owd^t*I$NI;jTD=EUjMI-sIwmP@lIpdw}M^X#clfi$4E?={Kh7qIzM&>BP6mc-8? zV5WqbY&)qIXR#!ZwMb{M?AZ>U_)^ND&@kx)NoQp1t;~p4&jt^dqN8?&duqk5eJ%qn zcS3-@fDMJF`%P(kqe4BCloqzvifeNGd;R>b*iq!v^A{0ZD+uNFUTj;jv4L5m0Y6HA zOR$1YGeRBkBAGZ|HeY`?+ z?W)+Y8|i5H+fWoHN-pi{s{;Soq}F>yHc!i%A}?UET|KU>X`32{J5-lP2V1C-U2#T+ z*eL&$qO`%SB#s}f050_aeCO-R#V_Ygl-MLV=U0~)R}Ei z!>WGBk1Ia~Z#L<-F>9#je33_~r?XSA!LGM&Kb0ywXUHw*SCj2+|LDo+ zJ!!pRT3#m>o-BaH1iQ(X`9RIZnZ??4yZR)4sNh0@Qi=aWRY0u)`AY$9; z`g?<>ZhCt8sFt{$NA_y_H;c7n7wcJB7`%xtkFSNmoF+3l_MQ&AMU42x_0~3hwKmoJ z%j*EDJ9gxcy-=Aem}2DAdFXLKjBd}F%#rTl6+k>@pnxARJ}ZW9IBo~HCRs1dN`@hs zk`6iY2svkHVVT@RFo@;dj0%bvl16-m)I-v}dqMaIuEoX34DVJvBO{%q?#%stolZha$yib-`<=@h(B{-|}w!5yDv z!qG4|C-qFc=f&z92ZWWfe?RxP{du!P>Sn(WqBCFHcGEA_zr&6)4OAp-=}~02(hl9p zq;#zA@>DG8{WQs9b)A}ePY#X4no6KPp#qAI3PGu;2d$ACue}x=h1Pv(kb(=kvl_T+ zc8fLiZQ=YN$RiJz>W^bt;a)K*qhIC(DJ)8&J4j)EJmkHj>W;w4@$t!yjnH$GH@OxJ zUA0g4^G5qIiyr^YCCEzRr>2uZpE+O4kA zAL-8--UyY`kB2Vee3x=Q+_f}G9V$VlSpq4|B=_-dAz9DJkL`1#Ff`8)Ou|r?CP6b0 zLd0Fxc=B#a&_b$Kk)5BsWU|iOVOBY^Dzz|eVhy`07Uu$jwP~NJ@o__Rr^#-XHnbnz z7||KZpS~qPy?+IszHu8glB{DlCFf@4 zYiZp_rGLz8&jPTmx8ym8w~Pz%DX+JuM}bGWSKu6>OJ7bQdq%k3<21bfnhCXZLb8hy z-a@J1Hb~S!eWZojs28STxb}vAcw~sa-gBfU$L1^ z*{_o45LseLeh&g4NR|qhBp8#+u--{s!8EWPKoKUMb4j$xs&iw!B7_O3p6AFMs}ye9 zYP=71PHtZKrJ0MJ(UoA=UK6I4S=zCCktU{rS2S%8AB4m#qcOk`%!hV2Xy@`{frVAW z4%t+_ibu^ZY%4$zBM!!Id>tmzjDW-#5?mmAZn4>H-SjOEtB$3)q7evb>IJQYcmCbj ztNJ&%;8_nf!w?3w?;C2xjogrH$5&SytSPg>O)WGI^x@UQ)=90|+9Y<0loGjmN%8Bo zyYW;KZlQqhg>XcZP~%Uh7L|9Jk}2t!0>N)61`Uv%D6wWjOux>R7N7^tn2f8oM%DRS%+-Ty;S8^ zs_dH|loWL87;m?zs`%Y1XuC%^0YUuW9Bfn0HQx{4ChZ)K#1}GKdwqV&2%MI**-5_d zVy3LQcr)Oheo6&xbRxC|>CNU}RVOGp0X`_qE8H1E$#*7OD`Rji{-ArOt5ul`dd?(RIx^yd;U)eeA{T$sF(yii3797dD z(fYevJN`mrJRe$A_&YTlOe?%n{;$Bdl<-a8d$hBvgF&}C10CR7L+l$ zy|hS)U01_)M^CcRNpq2+FUp_0%8O2oGo5G;lgpg5xSoC zQ0XPWo;C*sD*L5BPysVK&}-X19C}im;`4mBX2S;M7#6&Gf^E{}>te$M>_>{OkmB2# zy&fj^2vd^Kx>;9zg|p^vMiqJEkq%rXZKYb+|DKY8p~aG3g`qdskoqol$8y{<)-1p)^B;bw6)NQ}N%0{sd^e~MPbIp= zG6eIR_N#Hu&!M^sWV4>kKRL%eRQ)3cQMYXz_SX)72Zi2oBJa>P^J5AHB=IRj$nQ+4 zELjiGz_o*4^X!^%ERAL>Hrt>#tAO=-cDT^QHO(>db`oOWSa)nBW>|wYaC8VYz|Gai zW`@{`hRl*~bIs4ywc5zQ8Xm^MISMLTU)><#I-xlsA|Oz`27q7M8V&zM8B7X5_5>r|k3xBjXAzp#DW+C)AZATh+Gg zp9YQZM=1t}P>ALjAi}8kf$oib^R8dD##;v~yB!UW)G%N8bg#ZQ2n+$=ed79hD8`HL z6zrKyhBn?o=K*gtAj6LKE9%30$kF#u16QZ(M%)=Ji3~=6$^p+^e_so4`D;VupZX_G z8coieCOU|_Cz#PmR4j+J+0sU=nD0a8sABAyPnwXxEMBf52O~3%6m6&@HOL|!R~{*m zsG$r_6#NJn|4&{C(C_ay)f{x+YBS|Ftm4E!SBGiVS%hA^Wr`PyR2##v>F@5ryjIGs zz17BT9?k>RSNr6JF@u+iIOvsm1B$WDI@Q_o0C_05^YAEqUY2ZLf7^rqxI*iFG*G9k z(7z5eb^fvGoV1z?`O~+b6LyV4VhVG$!3AZIA-I>oL>%yPfTu!~pQdl?s_|xMgBaIt zz8|b7>1_f{5azO1j;p`H?36T+duM@D%THPiBEUL2I7pcyrd?150qJlyn#?W3Q_^XE zzrqXhRs8Ve+|BCJzblMCJjuS65q@dn7oiG>gyD(-(G`5fD|A0uJ%K^wl2N@Sa!c1kg&OE@bRyx5l504kFXhY~?IxV&laE4J8dc$0UAB;+A9}&N}`DgVh2b3+4i3k5|5f&Xg|c#m~Wj zAK5@}Xdw!Qz<+GxhpZk1MtTcYMiQE6S#A9mMTpPkAs((xX0{V{5dH;~wPthqQzJEdu`h4_WxY51; zTTja2FX*9;0QkG#lhXdtc5BDjuIC>y0m-Mo!Lu4Q#KVNI=yv_PjDjaPECHwed(3=C zpj>0QiI^huYns`b8HVJ*tBWAH>&($y|8%(p2A}=WJhf+<<+w=lih4#8xv~Kb|FiicdY9j!1s!av5I!Tsv7}5PoZeWU=C+?agdX<`Z0s<3n1IB6Egz8rG2Z9 ze27J}BQ4tXUGoVE+2hMiK>6e>w!82629GG{G~Fi%sbwa!>TES#DIqOQRuvA>Y>+f> z^I-nbTK9@nuEn2J|DQ6R2L9iN`H#G_Apy}svGF1Ijzz@@RA2H*@JJokS<--F|KhML zxG_fkdlu2AfYi55nGKIy(XvkiM)$9AVMahFSXVQ%o@0S0(2;ZVIj{u|r z8A;RNj(rt9J5T~i#_*c!_{teNL14lS^hU>5Yyjg{ZPCd%|NBmWtEJae@|;h_->3TEd7CdX4pBl1sg-5;yU*#4muEOd}2|M+lzw5Zm>iSqk@=JgP0gBzI? z$#8trGulCbP&kb_(XW0u49yFHO|C0&Uva0!2ttN1>Jole7vQo!0Dsh>){Rxr;VS=ca;uGBM?KU^g_75d(w?16h@E~#$g z0{>Z1(Wcbf-}?lRX*B>heo!i5{i89Ub}%^8B_4?>6MF%dBsut`QUv-0-jB0vTZ%+_ z5gQTRFWy7^zYHmt_($jgWYI+bkRPPBUlO}zL$Rr#td^A3{!IUu%83L%xy5**J^1qQ zMI~Kyi0WN$gh@ZGVY$bH8f5ziMEfW~y12`ursPe>#u?tTAg%QXFzs(oKz6cKgIz9r z@a@v3j31HZCnwc>g?z(0F{u5-S<*vWb!sR+|HMNx)^>HZKh^`rH`F2%)OCO zyzX=*>0e9p3e*q0gwGI|3UlvHaQtP&O0WQ8ZLV`nVY`S)Y&$0I3dELWK%rqAADKLC zXp$(eOy1Fc)c9%a*cF{QcO7tKw$BQALhsJn{LqU)7y4Jg!TXwydIkF}Q}U$0xg#I$ z*BvnxPo7ZrEl8?>(|!k3W`utJ&YuMp(1Ql&jUqVnRaZH)9s$?EI%f~%5GgAg58}^q z7cE!hEk9uZpi3ifu-^fSskpp!n}d_gvQR)g;@CyeBq#;S0}&MND#}me$@rb0O*6o5 zl!xm*j;MoDN7Uw**5#Aa#abL0${m%CxQZiaN6$JYq5M`Sw4gN`$nbu)%)R($PBxc7 zblMQL#N3;I8~s?;GR-Tws05lk$xY!MlHsHCrW3LxtL@Ud`1cH&{C9kjr5Y)b#+ zW4*QBhJhlGctJH6>54WY+BN(vCvf&Er9}KWqvkUO?u9+hq|Gg?h^CDCxsl> z-#5^%l=*Ke+KNCLW86lHjP+jLN)mD5F+fgQFWmR|xq3!bGH@nHIrWxKEDTriO#a7q zJtUuMC;_+_M%LqwbH1i+WuMYTXLV%ieB9C-8wy9n43Wc3*pcu=-D;pY{b`6+-rx&d zgT%r+>l)!PRmTJA?I8O&C)%CVixkd}ytR zxav3j#vBUZ`V0>F=3C|5m2y*y4oT(ciwj+`3)&*8f#-iHCJ=3w2b;mltw>@z^z-_y z?`~x91#~I=(WW!ue}f;*pXl9wiO4i%IMKQ4+mK7a9JMZsQ z3ISTjU>1hP;McZVU7>5bH28V8>QSe-=Pz?hxgEV0Skw#zM0GGMd>Mzo(;DX z8w#tY9jbu1{wQN0?ldetO`zoax`A-+cI%co2MPr7BzC+tP58M#ea z_WMNkLzNWHfU=Dbp07pg#J0!tWQX77%(R5O{1LEaxKhY^E9CKe_N&lR@l~g}@nmc3 z*I|oZ5*1=4xz@0i(Da2N*tq?jZFhV98?G(Pt(8vfphK*m`kM}NV$IIEF!PmFF005`Xw(by~DAwrNEmrJ^ zrNXq0?92`>(!&;{`0+<-8_~jP-hRn${tkq&@lY{u3hc^~-Bf(fU3YA^AgdR)_>=JK-yT~WKkI;6bJ|xX??a1r+vA(M!%ez?*?jx(c&lFI0iSs=zqh1r{91%T;heFX5ayI1{vjOuhGZQC0qM6Z|)7A(3 zY)3e&afi?kZA}MytGiflNqOc$kQ=vLD{}mCRv>O6m9)IZBFTYxnGtnFRF_%|GkIfdavpL^kR2O zArF0yT6z}JMKqK?*Z3X1Qnw^czwCI_5q{NQsnVgI7|;&u-5EtX*;bxT|6UVW+C;O$ zdRW8d-D{N0CtPMA{rrKW1=zFQ-}*#H!4almh~ICTFHDY(%yQ#j?NNbvDxc!9TaNoJ z^jFux(~BVrlrPNQ9r(kPiSn_+EZ(Ivun262t~N(E6U2ajHIU z$WG>EO_zWon?iEDSNc&zceO#N(nNddLGu`?;(U+Tb?Cd--Bb-Vdty;-w}+*X-^RuF zs!v$U>}~kHd+4zg`v`G_V!b1uX(xnsk1F%6yEiWY9lg7m@+^5|UPnh|PQw`+c3DFxlp7hos8A6$85xTv$ZZ z2cvT!9g6*eFuf2Lo<&E9b#iw>CCZntlb+wVW~4O2eZ@PeMa2SKkg-95Fr4T|`tGG; zYWk5{&OeA!Un1N*hXlu0Ls`vx0QjM?r|pB!Zq6i)WZ|0)qx3|HeDv)a738AH*kdGS z#HQP{pms}lCsVC3TAMR7?uw=#adYUAKVq`B2R}s<)H;P3XZ$ElQ7)N>^`A<^?v_rw zSDs&9Xl})kpgu1YjGGoxXvS~>85gTjy@t#6vB~vV(o$gQ z^t!AQ!GA5*!RV>^S<-8=`3-N81h1e^j+0qeLxHE>3x-gWKL;K5TbBY4zX~g!WwZNoe%DC@UEpcPFAd9KbB^I*=8k2-*dH)U@oAQ=LO&E?7 zR_t&E&U&4_m?L`D0Gx=pMd7#bk;i>FwVJ0<#c#>cS>YG;bV%YBXa2;nk&rSjAB5}*8sSuQr}wzbP3<5peA zpAtT$PAu!2O3lf_c(2PX-dz-%yM1X9;c!W?BX#`mCijTlZKN&VB&SbFQd#7-Q`Nbvr{PNbna9lRUc8@Ht?UmZA73v&))Qf?u1zH8nr1 zs$SWD0%f^IczG|3Y<@V|a^+@v0;@O`rfJ8xSXLWvuXD#%Vy02+*|^L)zErcIHY?Pn zP`1nLF>hnOLRtVNuAIu%Gm(tbGuhc&Bx_n9~y8^yL8y~9pzG%GYg z6k+|&*2n}AE-XrJ;C(1+nqNpayXit5GL)+@ISDViH}s6*@Xol&Q5ajg<9H*lQ(4#E zx7!l{XJ!;bOO_J~&q@V_2DqMM2#){Pru{gP0xvRnrE*$mVB$4g`je?$K6>h}tG*@U z^GWed>|X0UPnZqo>gZ$_S1!<^uC~eXh4XKdCe!Oz*97U(ZdgxKUMS}2dI&0b{mgwS z{m!E7+BaWYMwCYp^KPa!AGJvXVk5Colu?a%sqN`GjN8%DVSpxf{4n!+_}e`Wwn_-??$d8+nU~!NL|nVw z%K|x~8_Ji&;E+k!;t8b&-hs^3{_yi6_2HG9jqyQo1zNW9U-NJa0GY6GS9-1RGK%D6 zN?Z!#y*`v{Nm}9uJla^xsDm^-BCIvEp$8S!yyX_L+a8>xu=DgbL2DFQ?wORzYTvi|1WV5m_7ac|nR-ub zYu4_x&N`D>vMjoeIAB+gP#PQMJa(%#)bHjs^Mi!^H#t(1dv5CjRFLMU87>NHgc#y>$ZHUx);INN{xkPFXQjG_fi4ENFJ<+n+?m zamF$M99T^OH!4NEcCPp^cOcu|p{WwZu(t|z@iwsDh@=lodUPv(9qDnGg~l(_S@6p zl^rWf!Y#ogZ)8C-lB@Zl_{Je8z>;~dPG+*w!EyrZfCzwKvWHA0$8;J7tWm^u&zovP zakn!DRmaw3Ej{x`8^2WwtHCf&Ut{uNm>eClLglaWz+SIGeAAHkS0T_%4dXK2uF?|6 zWm^S6Wpc3J#?fkGZEVt_3_`4K#E!Dw(TGF4kt(zxZsm+g8-0N7L;5{xM+Yk5qGrps zmq9a*qt53@#;EMJ%YsWaVbu75{9GbgsK2(9}!I9n+oGj&XH<#FLCgqyWEiVdtJrMk0NfI z!>V7CzTcZkjynw444H4tlz)h1B5lM1APL`Oryf4!fs@F7MBL*O$__=P=;ewMXUI%G zI`g$7od@qpohyb<_xp$VA1$orzGO1GBWTg#^7lReUQ7oZ|;uEWdJmT@Ky@` zD>C?%;W2*!puB7*!_2L4pREST@%`A(Au;VOpaki$whV2sUQYmoETL7gfzT&FUFoGd z6g=Q#+=|2vnuo4QSbpsSn5;SIBFBNL`b_@kGPrfyzbV3;j5XIP9mI_sNH$S7G6rQ4 z2bQR#;Wi^}%|c5PYBkY&IcSl=cLpB>8*)G6wqLs&`m|!Z#quj>scpo%zm{;S$7Gdh zRR-DV+Utnn=1s;$QTP(5+Z->bx-m#5B!}87-ZmthDeqZql#wScwy5O_+*Ru@&p}kK zhmMuXhk#8!HYcsccclyHHg(pNH0LjaDFv;Kd{+L;Fa6*4Cipe$y)7(ZqJ3U^`eU2- zUx&(ay|v#BeFhLA=+gH(95Yy?U0KxCTc54)y{}l2{6nknIwkE96pY>@k%_lfbA(<;(+bT)!pJU ztw+g)^t12UbWsBbtgXCrERGLp#^w}!#GX((8LtrOJ{Dfn<|wP5r>_1oO@YWa9zkj; z8I*FmuQfS8go*CCh)6_|E4%IJ-Z9y;ADLt#@2DoM^ZnPv*>5=l+MvUaoEjN-NIq}Z zX{adS$o)BxD;9hXG<#B_+#A|>8t#_$fZGIkltO&NoQ3}9+SxUi$df>eq0r~!8pM7X zM;&;q<2^##eE{Pw_eR#c-OLy);SCj?J$QPVyrVCF11M4YlUx1gQ-&%42y2hnMA#HK zw$HPKRjB5@cjPVzAn(v!b8W)!CVp)+`Y@-Df`+{baE$*mF%~A$PXr^unPhMwXn7AQVf$F!~`Yk4Q26mt%A*d0+N3J zf%F~g7|$h5rLkEZcq%I-%}+rrd(Z=(gh{3d+@N_iGZ)7^h~_30al8Ov=Bwn&)I8Ou z*r~{@6lQKH{%rS5-8`>(*~XFrurQu2th&S#l+whHm-Mazw#LMem#FiP&7 zq3I6*3n0pU0hi7EADH;@GY!nHNM2rqY|UC(AA|Y(K#?Z?PU0MvZ%X@~*u5#_0swGb z3w7I8M>nkQoq`Q6g*!_~uVT8H?87r4?s4eFe?dvV4-PP8^ZaNC_XE-S?D|1kKcRA4 z$a#rY)N?1%ykU_hfZ_mz367X@SJz?_)a=>+r|>CnC@8z(bwzWm@J&myW*drkbK-(8 zdwG2V`eFZse{XVCmli%ydzobpxtmS?fC3glziOZLOd~#Yc7B&q^7%ZuGz^drLC~wef zF5i3!6D6jQ^!fofI6GzK+;0%*cx)O_dVqX~Ea^T+PHT>~CS#Ly{>nxh8xbm&Kj>~q zT=-yV+$;OB`AmXCT@!ayQ`e3FK;fak(`pW(k9pMf0C;_ZC5uyqwuSo3U~VEuhN^kl zF=%zHKmgtmV1>VMH)pE#4Uk@=Lq-F6u>2QdbiZ&14W#e|q}T2> za?JbCsGFRUMdAA>n=DrmXy*maZmGF_l4rIVXrcVca^$~UK)`zs%1<;x?f|tkl6qJC z#v0B;EXqQy>^)OpmK+V6imq-XR2ayB8>P(tD`NhwegL!!0-#-b=h`c(xtRqg;Wo9D zRmZI}{%jXu;Jmck zBjNDh4j3@&egLdXd|3o${?}~&Od=lwEQ+^rJ>^%M@BeZC{|k!uuWx7<0E>z>^-u?T zwf*<#fg%8`nLES#_0<36j{s1n4Pdd#g3@_F>HfdQ{Ix_d6VPsVz?uH(aX;@r3kE*v z@B#>A4nx|&ahsNZ-=F P{_BCF`roDU=0X1loZVfM literal 0 HcmV?d00001 From 28096581990969961f53d18988e4edf4bbc907e1 Mon Sep 17 00:00:00 2001 From: Naorem Khogendro Singh Date: Tue, 10 Sep 2024 20:54:20 -0700 Subject: [PATCH 07/75] [PLAT-15145] Consume universe marker field for node-agent installation to decide the client type Summary: The changes are 1. NodeAgentClient.isClientEnabled() consumes the universe marker field to decide if node-agent can be used at the moment. 2. Universe flag disableNodeAgent changes from false -> true -> false during migration. On YBA restart, a migrated universe must not be considered again. Universe tasks take care of it once the migration is done. 3. For an old provider, once the migration is done for a universe, further node addition must directly install node agent instead of depending on the background job. Moreover, a new universe creation installs node-agents. Test Plan: UTs passed. Manual Tests: Case 1: 1. Created a universe with old provider with node agent client enabled (default) without enabling the node agent enabler (scannerInterval = 0). 2. Enable node agent enabler and restart YBA. 3. Verified that node agents got installed and migration finished in the background. 4. Restarted YBA to verify that the marker is not set again. 5. Edited the universe to add a node. 6. Verified that the node gets node-agent installed in the task itself without the need for background migration. 7. Verified that health-checks and slow queries are run via node-agent. 8. Verified that a new universe creation installs node-agent. Case 2: 1. Created a universe with new provider with node agent client disabled. 2. Creation completed with SSH. 3. Enabled node agent client. 4. Observed that node agent got installed in the background and migration completed. 5. Verified that health-checks and slow queries are run via node-agent. Further tests will be done for corner cases and more scenarios + itests should pass. Note: Currently, this feature is disabled. Reviewers: amalyshev, sanketh, svarshney Reviewed By: svarshney Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D37959 --- .../yw/commissioner/AbstractTaskBase.java | 6 +- .../yw/commissioner/NodeAgentEnabler.java | 169 ++++++++++++++---- .../yw/commissioner/NodeAgentPoller.java | 5 +- .../tasks/EnableNodeAgentInUniverse.java | 2 +- .../commissioner/tasks/UniverseTaskBase.java | 31 ++-- .../yugabyte/yw/common/NodeAgentClient.java | 65 +++++-- .../com/yugabyte/yw/common/NodeManager.java | 9 +- .../yw/common/NodeUniverseManager.java | 2 +- .../yugabyte/yw/common/TemplateManager.java | 2 +- .../com/yugabyte/yw/models/NodeAgent.java | 11 +- .../models/helpers/NodeConfigValidator.java | 7 +- .../yw/commissioner/NodeAgentEnablerTest.java | 20 ++- .../yw/common/NodeAgentClientTest.java | 8 +- .../yugabyte/yw/common/NodeManagerTest.java | 2 +- 14 files changed, 249 insertions(+), 90 deletions(-) diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/AbstractTaskBase.java b/managed/src/main/java/com/yugabyte/yw/commissioner/AbstractTaskBase.java index fb4d6358f7f5..340978d47596 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/AbstractTaskBase.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/AbstractTaskBase.java @@ -68,7 +68,7 @@ public abstract class AbstractTaskBase implements ITask { // A field used to send additional information with prometheus metric associated with this task public String taskInfo = ""; - protected final Application application; + private final Application application; protected final play.Environment environment; protected final Config config; protected final ConfigHelper configHelper; @@ -353,4 +353,8 @@ protected UUID getTaskUUID() { protected TaskCache getTaskCache() { return getRunnableTask().getTaskCache(); } + + protected T getInstanceOf(Class clazz) { + return application.injector().instanceOf(clazz); + } } diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/NodeAgentEnabler.java b/managed/src/main/java/com/yugabyte/yw/commissioner/NodeAgentEnabler.java index 13b3bd3015fd..662fe6924161 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/NodeAgentEnabler.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/NodeAgentEnabler.java @@ -2,8 +2,11 @@ package com.yugabyte.yw.commissioner; +import static com.google.common.base.Preconditions.checkState; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Stopwatch; +import com.google.common.collect.ImmutableSet; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.yugabyte.yw.commissioner.Common.CloudType; import com.yugabyte.yw.common.PlatformExecutorFactory; @@ -25,6 +28,7 @@ import java.util.ArrayList; import java.util.Comparator; import java.util.Date; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -39,7 +43,10 @@ import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; import javax.inject.Inject; import javax.inject.Singleton; @@ -53,7 +60,7 @@ * Auto node agent enabler running in the background to migrate universes to node agents. The first * step is to mark the universes pending node agent installations. As long as the marker is present, * the universe cannot use node agents for communication. The marker can also be set externally when - * a new node is added while client is disabled for the provider.* + * a new node is added while client is disabled for the provider. */ public class NodeAgentEnabler { private static final String UNIVERSE_INSTALLER_POOL_NAME = @@ -66,9 +73,8 @@ public class NodeAgentEnabler { private final PlatformScheduler platformScheduler; private final NodeAgentInstaller nodeAgentInstaller; private final Map customerNodeAgentInstallers; - // This controls the number of customers as only one universe is picked at a time for each - // customer. private ExecutorService universeInstallerExecutor; + private volatile boolean enabled; @Inject public NodeAgentEnabler( @@ -84,12 +90,14 @@ public NodeAgentEnabler( } public void init() { + checkState(!isEnabled(), "Node agent enabler is already enabled"); Duration scannerInterval = confGetter.getGlobalConf(GlobalConfKeys.nodeAgentEnablerScanInterval); if (scannerInterval.isZero()) { log.info("Node agent enabler is disabled because the scanner interval is to zero"); return; } + enable(); // Mark the eligible universes on init. // TODO we may not want to run this everytime on startup. Will be fixed in subsequent tasks. markUniverses(); @@ -109,27 +117,78 @@ public void init() { * quickly decide if the universe can use node-agent or not. As long as the marker field is set to * true, the universe has nodes pending node-agent installation. */ - public void markUniverses() { + @VisibleForTesting + void markUniverses() { Customer.getAll() .forEach( - c -> - c.getUniverses().stream() - .filter(u -> !u.getUniverseDetails().disableNodeAgent) - .filter( - u -> { - Optional optional = isNodeAgentEnabled(u); - return optional.isPresent() && optional.get() == false; - }) - .forEach(u -> markUniverse(u.getUniverseUUID()))); + c -> { + AtomicReference> cachedIps = new AtomicReference<>(); + Supplier> supplier = + () -> { + Set ips = cachedIps.get(); + if (ips == null) { + ips = + NodeAgent.getAll(c.getUuid()).stream() + .filter(NodeAgent::isActive) + .map(NodeAgent::getIp) + .collect(ImmutableSet.toImmutableSet()); + cachedIps.set(ips); + } + return ips; + }; + c.getUniverses().stream() + .filter(u -> !u.getUniverseDetails().disableNodeAgent) + .filter( + u -> { + Optional optional = + isNodeAgentEnabled(u, p -> true /* include provider flag */); + return optional.isPresent() && optional.get() == false; + }) + .filter( + u -> + u.getNodes().stream() + .anyMatch( + n -> + n.cloudInfo == null + || StringUtils.isEmpty(n.cloudInfo.private_ip) + || !supplier.get().contains(n.cloudInfo.private_ip))) + .forEach(u -> markUniverse(u.getUniverseUUID())); + }); + } + + /** + * Checks if node agent enabler is enabled. + * + * @return true if it is enabled else false. + */ + public boolean isEnabled() { + return enabled; + } + + @VisibleForTesting + void enable() { + enabled = true; } /** - * Checks if node agent is enabled for the universe. + * Checks if node agent is enabled for the universe during universe tasks like universe creation. + * For onprem provider, the provider flag is included in the check to detect old provider. * * @param universe the given universe. * @return empty for non-supported universes, true if node agent is enabled else false. */ public Optional isNodeAgentEnabled(Universe universe) { + // Provider flag is always included for onprem irrespective of the enabler state to be lenient + // on new node addition. It is up to the caller to further check the universe field to verify if + // the node agent client is immediately available. + return isNodeAgentEnabled(universe, p -> p.getCloudCode() == CloudType.onprem || !isEnabled()); + } + + // This checks if node agent is enabled for the universe with the optional parameter to include or + // exclude the flag or field set in provider details. + private Optional isNodeAgentEnabled( + Universe universe, Predicate includeProviderFlag) { + Map providerEnabledMap = new HashMap<>(); for (Cluster cluster : universe.getUniverseDetails().clusters) { if (cluster.userIntent == null || cluster.userIntent.providerType == CloudType.kubernetes @@ -137,24 +196,48 @@ public Optional isNodeAgentEnabled(Universe universe) { // Unsupported cluster is found. return Optional.empty(); } - Provider provider = Provider.getOrBadRequest(UUID.fromString(cluster.userIntent.provider)); - if (!provider.getDetails().enableNodeAgent - || !confGetter.getConfForScope(provider, ProviderConfKeys.enableNodeAgentClient)) { + boolean enabled = + providerEnabledMap.computeIfAbsent( + cluster.userIntent.provider, + k -> { + Provider provider = + Provider.getOrBadRequest(UUID.fromString(cluster.userIntent.provider)); + if (!confGetter.getConfForScope(provider, ProviderConfKeys.enableNodeAgentClient)) { + log.debug("Node agent is not enabled for provider {}", provider.getUuid()); + return false; + } + if (includeProviderFlag != null + && includeProviderFlag.test(provider) + && !provider.getDetails().isEnableNodeAgent()) { + log.debug("Node agent is not enabled for old provider {}", provider.getUuid()); + return false; + } + return true; + }); + if (!enabled) { return Optional.of(false); } } return Optional.of(universe.getUniverseDetails().clusters.size() > 0); } - public static void markUniverse(UUID universeUuid) { - Universe.saveUniverseDetails( - universeUuid, - null /* version increment CB */, - u -> { - UniverseDefinitionTaskParams d = u.getUniverseDetails(); - d.disableNodeAgent = true; - u.setUniverseDetails(d); - }); + /** + * Mark universe to disable node agent only if the node agent enabler is enabled. + * + * @param universeUuid the given universe UUID. + */ + public void markUniverse(UUID universeUuid) { + if (isEnabled()) { + Universe.saveUniverseDetails( + universeUuid, + null /* version increment CB */, + u -> { + UniverseDefinitionTaskParams d = u.getUniverseDetails(); + d.disableNodeAgent = true; + u.setUniverseDetails(d); + }); + log.debug("Marked universe {} to disable node agent", universeUuid); + } } // Used only for testing. @@ -250,11 +333,12 @@ void scanUniverses() { customer.getUuid()); continue; } - if (!shouldInstallNodeAgents(universe)) { - log.debug( + if (!shouldInstallNodeAgents(universe, false /* Ignore universe lock */)) { + log.trace( "Skipping installation for universe {} for customer {} as it is not eligible", universe.getName(), customer.getUuid()); + continue; } log.info( "Picking up universe {} ({}) for customer {} for installation", @@ -310,10 +394,18 @@ void waitFor(Duration timeout) throws TimeoutException, InterruptedException { } } - public boolean shouldInstallNodeAgents(Universe universe) { + /** + * Checks if node agents should be installed immediately on this universe. + * + * @param universe the universe to be checked. + * @param ignoreUniverseLock true to ignore universe lock, otherwise the check returns false if + * the universe is locked. + * @return true if node agents should be installed on the universe else false. + */ + public boolean shouldInstallNodeAgents(Universe universe, boolean ignoreUniverseLock) { UniverseDefinitionTaskParams details = universe.getUniverseDetails(); if (!details.disableNodeAgent) { - log.debug( + log.trace( "Skipping installation for universe {} as marker is not set", universe.getUniverseUUID()); // No marker set to install node-agent. return false; @@ -323,6 +415,13 @@ public boolean shouldInstallNodeAgents(Universe universe) { // No marker set to install node-agent. return false; } + if (!ignoreUniverseLock && details.updateInProgress) { + log.debug( + "Skipping installation for universe {} as another task is already running", + universe.getUniverseUUID()); + // This only prevents starting installation but allows another task to run in parallel. + return false; + } if (universe.getNodes().stream().anyMatch(n -> n.state != NodeDetails.NodeState.Live)) { log.info( "Nodes cannot be processed for universe {} as there are non Live nodes", @@ -336,8 +435,7 @@ public boolean shouldInstallNodeAgents(Universe universe) { universe.getUniverseUUID()); return false; } - // Check if node agent client is disabled at the moment. - Optional optional = isNodeAgentEnabled(universe); + Optional optional = isNodeAgentEnabled(universe, p -> !isEnabled()); return optional.isPresent() && optional.get(); } @@ -415,8 +513,7 @@ public void run() { if (!nodeAgentOpt.isPresent()) { return nodeAgentInstaller.install(getCustomerUuid(), getUniverseUuid(), node); } - if (nodeAgentOpt.get().getState() == NodeAgent.State.REGISTERING - || nodeAgentOpt.get().getState() == NodeAgent.State.REGISTERED) { + if (!nodeAgentOpt.get().isActive()) { return nodeAgentInstaller.reinstall( getCustomerUuid(), getUniverseUuid(), node, nodeAgentOpt.get()); } @@ -443,8 +540,8 @@ public void run() { // nodes are deleted, migration will not happen due to installation failure and next cycle takes // care. private boolean processNodes(Universe universe, Function callback) { - if (!shouldInstallNodeAgents(universe)) { - log.info( + if (!shouldInstallNodeAgents(universe, false /* Ignore universe lock */)) { + log.trace( "Skipping installation for universe {} as it is not eligible", universe.getUniverseUUID()); return false; diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/NodeAgentPoller.java b/managed/src/main/java/com/yugabyte/yw/commissioner/NodeAgentPoller.java index 076b054ec762..abf4a3bf5f0b 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/NodeAgentPoller.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/NodeAgentPoller.java @@ -305,8 +305,7 @@ private void upgradeNodeAgent(NodeAgent nodeAgent) { return; } nodeAgent.refresh(); - checkState( - nodeAgent.getState() != State.REGISTERING, "Invalid state " + nodeAgent.getState()); + checkState(nodeAgent.isActive(), "Invalid state " + nodeAgent.getState()); if (nodeAgent.getState() == State.READY) { if (checkVersion(nodeAgent)) { log.info( @@ -465,7 +464,7 @@ void pollerService() { String softwareVersion = nodeAgentManager.getSoftwareVersion(); Set nodeUuids = new HashSet<>(); NodeAgent.getAll().stream() - .filter(n -> n.getState() != State.REGISTERING) + .filter(n -> n.isActive()) .peek(n -> nodeUuids.add(n.getUuid())) .map(n -> getOrCreatePollerTask(n.getUuid(), lifetime, softwareVersion)) .filter(PollerTask::isSchedulable) diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/EnableNodeAgentInUniverse.java b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/EnableNodeAgentInUniverse.java index 807a6370f431..2169c6c6a242 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/EnableNodeAgentInUniverse.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/EnableNodeAgentInUniverse.java @@ -24,7 +24,7 @@ protected EnableNodeAgentInUniverse( @Override protected void createPrecheckTasks(Universe universe) { - if (!nodeAgentEnabler.shouldInstallNodeAgents(universe)) { + if (!nodeAgentEnabler.shouldInstallNodeAgents(universe, true /* Ignore universe lock */)) { throw new IllegalStateException( String.format( "Universe %s is not in state to migrate to use node agents", diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/UniverseTaskBase.java b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/UniverseTaskBase.java index 9ed0f58b4d56..ff50c2fcfe6e 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/UniverseTaskBase.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/UniverseTaskBase.java @@ -1905,13 +1905,7 @@ public SubTaskGroup createRemoveNodeAgentTasks( protected Collection filterNodesForInstallNodeAgent( Universe universe, Collection nodes) { - if (universe.getUniverseDetails().disableNodeAgent) { - log.info( - "Skipping node agent installation for universe {} as it is managed by node agent enabler", - universe.getUniverseUUID()); - return Collections.emptySet(); - } - NodeAgentClient nodeAgentClient = application.injector().instanceOf(NodeAgentClient.class); + NodeAgentClient nodeAgentClient = getInstanceOf(NodeAgentClient.class); Map clusterSkip = new HashMap<>(); return nodes.stream() .filter(n -> n.cloudInfo != null) @@ -1923,7 +1917,7 @@ protected Collection filterNodesForInstallNodeAgent( Cluster cluster = universe.getCluster(n.placementUuid); Provider provider = Provider.getOrBadRequest(UUID.fromString(cluster.userIntent.provider)); - if (!nodeAgentClient.isClientEnabled(provider)) { + if (!nodeAgentClient.isClientEnabled(provider, universe)) { return false; } if (provider.getCloudCode() == CloudType.onprem) { @@ -1980,17 +1974,11 @@ public SubTaskGroup createInstallNodeAgentTasks( } int serverPort = confGetter.getGlobalConf(GlobalConfKeys.nodeAgentServerPort); Universe universe = getUniverse(); - NodeAgentEnabler nodeAgentEnabler = application.injector().instanceOf(NodeAgentEnabler.class); - Optional optional = nodeAgentEnabler.isNodeAgentEnabled(universe); - if (!optional.isPresent()) { - log.info("Node agent is not supported on this universe {}", universe.getUniverseUUID()); - return subTaskGroup; - } - if (optional.get() == false) { + if (!getInstanceOf(NodeAgentClient.class).isClientEnabled(universe)) { log.info( - "Skipping node agent installation for universe {} as it is not enabled", + "Skipping node agent installation for universe {} as client is not enabled", universe.getUniverseUUID()); - NodeAgentEnabler.markUniverse(universe.getUniverseUUID()); + getInstanceOf(NodeAgentEnabler.class).markUniverse(universe.getUniverseUUID()); return subTaskGroup; } Customer customer = Customer.get(universe.getCustomerId()); @@ -2030,7 +2018,7 @@ public SubTaskGroup createInstallNodeAgentTasks( protected void deleteNodeAgent(NodeDetails nodeDetails) { if (nodeDetails.cloudInfo != null && nodeDetails.cloudInfo.private_ip != null) { - NodeAgentManager nodeAgentManager = application.injector().instanceOf(NodeAgentManager.class); + NodeAgentManager nodeAgentManager = getInstanceOf(NodeAgentManager.class); Cluster cluster = getUniverse().getCluster(nodeDetails.placementUuid); Provider provider = Provider.getOrBadRequest(UUID.fromString(cluster.userIntent.provider)); if (provider.getCloudCode() == CloudType.onprem) { @@ -2047,14 +2035,15 @@ protected void deleteNodeAgent(NodeDetails nodeDetails) { public SubTaskGroup createWaitForNodeAgentTasks(Collection nodes) { SubTaskGroup subTaskGroup = createSubTaskGroup(WaitForNodeAgent.class.getSimpleName()); - NodeAgentClient nodeAgentClient = application.injector().instanceOf(NodeAgentClient.class); + NodeAgentClient nodeAgentClient = getInstanceOf(NodeAgentClient.class); for (NodeDetails node : nodes) { if (node.cloudInfo == null) { continue; } - Cluster cluster = getUniverse().getCluster(node.placementUuid); + Universe universe = getUniverse(); + Cluster cluster = universe.getCluster(node.placementUuid); Provider provider = Provider.getOrBadRequest(UUID.fromString(cluster.userIntent.provider)); - if (nodeAgentClient.isClientEnabled(provider)) { + if (nodeAgentClient.isClientEnabled(provider, universe)) { WaitForNodeAgent.Params params = new WaitForNodeAgent.Params(); params.nodeName = node.nodeName; params.azUuid = node.azUuid; diff --git a/managed/src/main/java/com/yugabyte/yw/common/NodeAgentClient.java b/managed/src/main/java/com/yugabyte/yw/common/NodeAgentClient.java index 4c2e8cfd0f8c..d29a237d0738 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/NodeAgentClient.java +++ b/managed/src/main/java/com/yugabyte/yw/common/NodeAgentClient.java @@ -13,6 +13,7 @@ import com.google.inject.Inject; import com.google.protobuf.ByteString; import com.typesafe.config.Config; +import com.yugabyte.yw.commissioner.NodeAgentEnabler; import com.yugabyte.yw.common.certmgmt.CertificateHelper; import com.yugabyte.yw.common.config.GlobalConfKeys; import com.yugabyte.yw.common.config.ProviderConfKeys; @@ -20,6 +21,7 @@ import com.yugabyte.yw.models.NodeAgent; import com.yugabyte.yw.models.NodeAgent.State; import com.yugabyte.yw.models.Provider; +import com.yugabyte.yw.models.Universe; import com.yugabyte.yw.nodeagent.NodeAgentGrpc; import com.yugabyte.yw.nodeagent.NodeAgentGrpc.NodeAgentBlockingStub; import com.yugabyte.yw.nodeagent.NodeAgentGrpc.NodeAgentStub; @@ -79,6 +81,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import javax.annotation.Nullable; import javax.inject.Singleton; import javax.net.ssl.SSLException; import lombok.Builder; @@ -106,27 +109,34 @@ public class NodeAgentClient { private final Config appConfig; private final ChannelFactory channelFactory; - private final RuntimeConfGetter confGetter; + // Late binding to prevent circular dependency. + private final com.google.inject.Provider nodeAgentEnablerProvider; @Inject public NodeAgentClient( Config appConfig, RuntimeConfGetter confGetter, + com.google.inject.Provider nodeAgentEnablerProvider, PlatformExecutorFactory platformExecutorFactory) { this( appConfig, confGetter, + nodeAgentEnablerProvider, platformExecutorFactory.createExecutor( "node_agent.grpc_executor", new ThreadFactoryBuilder().setNameFormat("NodeAgentGrpcPool-%d").build())); } public NodeAgentClient( - Config appConfig, RuntimeConfGetter confGetter, ExecutorService executorService) { + Config appConfig, + RuntimeConfGetter confGetter, + com.google.inject.Provider nodeAgentEnablerProvider, + ExecutorService executorService) { this( appConfig, confGetter, + nodeAgentEnablerProvider, config -> ChannelFactory.getDefaultChannel( config, @@ -135,9 +145,13 @@ public NodeAgentClient( } public NodeAgentClient( - Config appConfig, RuntimeConfGetter confGetter, ChannelFactory channelFactory) { + Config appConfig, + RuntimeConfGetter confGetter, + com.google.inject.Provider nodeAgentEnablerProvider, + ChannelFactory channelFactory) { this.appConfig = appConfig; this.confGetter = confGetter; + this.nodeAgentEnablerProvider = nodeAgentEnablerProvider; this.channelFactory = channelFactory; this.cachedChannels = CacheBuilder.newBuilder() @@ -484,23 +498,52 @@ public void addNodeAgentClientParams( redactedVals.put(token, "REDACTED"); } - public Optional maybeGetNodeAgent(String ip, Provider provider) { - if (isClientEnabled(provider)) { + public Optional maybeGetNodeAgent( + String ip, Provider provider, @Nullable Universe universe) { + if (isClientEnabled(provider, universe)) { Optional optional = NodeAgent.maybeGetByIp(ip); - if (optional.isPresent() && optional.get().getState() != State.REGISTERING) { + if (optional.isPresent() && optional.get().isActive()) { return optional; } } return Optional.empty(); } - public boolean isClientEnabled(Provider provider) { - return provider.getDetails().isEnableNodeAgent() - && confGetter.getConfForScope(provider, ProviderConfKeys.enableNodeAgentClient); + /* Passing universe allows more specific check for the universe. */ + public boolean isClientEnabled(Provider provider, @Nullable Universe universe) { + boolean clientEnabled = + confGetter.getConfForScope(provider, ProviderConfKeys.enableNodeAgentClient); + if (!clientEnabled) { + log.debug("Node agent client is disabled for provider {}", provider.getUuid()); + return false; + } + if (!nodeAgentEnablerProvider.get().isEnabled()) { + log.debug("Node agent client is disabled for old provider {}", provider.getUuid()); + return provider.getDetails().isEnableNodeAgent(); + } + return universe == null || isClientEnabled(universe); + } + + public boolean isClientEnabled(Universe universe) { + // Check this first before the universe field to allow the change to be reflected immediately. + Optional optional = nodeAgentEnablerProvider.get().isNodeAgentEnabled(universe); + if (optional.isPresent() && !optional.get()) { + log.debug("Node agent client is disabled for universe {}", universe.getUniverseUUID()); + return false; + } + if (nodeAgentEnablerProvider.get().isEnabled() + && universe.getUniverseDetails().disableNodeAgent) { + log.debug( + "Node agent client is disabled for universe {} pending background installation", + universe.getUniverseUUID()); + return false; + } + return true; } - public boolean isAnsibleOffloadingEnabled(NodeAgent nodeAgent, Provider provider) { - if (!isClientEnabled(provider)) { + public boolean isAnsibleOffloadingEnabled( + NodeAgent nodeAgent, Provider provider, @Nullable Universe universe) { + if (!isClientEnabled(provider, universe)) { return false; } if (!confGetter.getConfForScope(provider, ProviderConfKeys.enableAnsibleOffloading)) { diff --git a/managed/src/main/java/com/yugabyte/yw/common/NodeManager.java b/managed/src/main/java/com/yugabyte/yw/common/NodeManager.java index ec5fe7bcb0e5..2f19ed6fc8d0 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/NodeManager.java +++ b/managed/src/main/java/com/yugabyte/yw/common/NodeManager.java @@ -470,7 +470,7 @@ private List getAccessKeySpecificCommand( && ((ManageOtelCollector.Params) params).installOtelCollector; if (provider.getCloudCode() == CloudType.onprem && providerDetails.skipProvisioning - && getNodeAgentClient().isClientEnabled(provider) + && getNodeAgentClient().isClientEnabled(provider, null /* Universe */) && !installOtelCol) { subCommand.add("--ssh_user"); subCommand.add("yugabyte"); @@ -1600,7 +1600,7 @@ public ShellResponse detachedNodeCommand( NodeInstanceData instanceData = nodeInstance.getDetails(); if (StringUtils.isNotBlank(instanceData.ip)) { getNodeAgentClient() - .maybeGetNodeAgent(instanceData.ip, provider) + .maybeGetNodeAgent(instanceData.ip, provider, null /* universe */) .ifPresent( nodeAgent -> { if (nodeAgentPoller.upgradeNodeAgent(nodeAgent.getUuid(), true)) { @@ -1743,7 +1743,7 @@ private void addNodeAgentCommandArgs( if (StringUtils.isNotBlank(nodeIp) && StringUtils.isNotBlank(userIntent.provider)) { Provider provider = Provider.getOrBadRequest(UUID.fromString(userIntent.provider)); getNodeAgentClient() - .maybeGetNodeAgent(nodeIp, provider) + .maybeGetNodeAgent(nodeIp, provider, universe) .ifPresent( nodeAgent -> { if (nodeAgentPoller.upgradeNodeAgent(nodeAgent.getUuid(), true)) { @@ -1751,7 +1751,8 @@ private void addNodeAgentCommandArgs( } commandArgs.add("--connection_type"); commandArgs.add("node_agent_rpc"); - if (getNodeAgentClient().isAnsibleOffloadingEnabled(nodeAgent, provider)) { + if (getNodeAgentClient() + .isAnsibleOffloadingEnabled(nodeAgent, provider, universe)) { commandArgs.add("--offload_ansible"); } nodeAgentClient.addNodeAgentClientParams(nodeAgent, commandArgs, redactedVals); diff --git a/managed/src/main/java/com/yugabyte/yw/common/NodeUniverseManager.java b/managed/src/main/java/com/yugabyte/yw/common/NodeUniverseManager.java index f74f4495b0ed..5494bc818fac 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/NodeUniverseManager.java +++ b/managed/src/main/java/com/yugabyte/yw/common/NodeUniverseManager.java @@ -530,7 +530,7 @@ private Optional maybeGetNodeAgent( UUID providerUUID = UUID.fromString(cluster.userIntent.provider); Provider provider = Provider.getOrBadRequest(providerUUID); Optional optional = - getNodeAgentClient().maybeGetNodeAgent(node.cloudInfo.private_ip, provider); + getNodeAgentClient().maybeGetNodeAgent(node.cloudInfo.private_ip, provider, universe); if (!optional.isPresent()) { log.debug( "Node agent is not enabled for provider {}({})", provider.getName(), provider.getUuid()); diff --git a/managed/src/main/java/com/yugabyte/yw/common/TemplateManager.java b/managed/src/main/java/com/yugabyte/yw/common/TemplateManager.java index b8d269d11998..6fee2433ea45 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/TemplateManager.java +++ b/managed/src/main/java/com/yugabyte/yw/common/TemplateManager.java @@ -121,7 +121,7 @@ public void createProvisionTemplate( } } - if (nodeAgentClient.isClientEnabled(provider)) { + if (nodeAgentClient.isClientEnabled(provider, null /* Universe */)) { commandArgs.add("--install_node_agent"); commandArgs.add("--node_agent_port"); commandArgs.add(String.valueOf(confGetter.getGlobalConf(GlobalConfKeys.nodeAgentServerPort))); diff --git a/managed/src/main/java/com/yugabyte/yw/models/NodeAgent.java b/managed/src/main/java/com/yugabyte/yw/models/NodeAgent.java index eb7faed7fe16..f4945eb09c17 100644 --- a/managed/src/main/java/com/yugabyte/yw/models/NodeAgent.java +++ b/managed/src/main/java/com/yugabyte/yw/models/NodeAgent.java @@ -69,10 +69,12 @@ @Setter @ApiModel(description = "Node agent details") public class NodeAgent extends Model { - public static final KeyLock NODE_AGENT_KEY_LOCK = new KeyLock(); public static final String NODE_AGENT_DIR = "node-agent"; + private static final Set INACTIVE_STATES = + ImmutableSet.of(State.REGISTERING, State.REGISTERED); + /** Node agent server OS type. */ public enum OSType { DARWIN, @@ -302,7 +304,7 @@ public static Collection list(UUID customerUuid, String nodeAgentIp / return expr.findList(); } - public static Set getNodeAgents(UUID customerUuid) { + public static Set getAll(UUID customerUuid) { return finder.query().where().eq("customer_uuid", customerUuid).findSet(); } @@ -487,6 +489,11 @@ public Path getServerKeyFilePath() { return getCertDirPath().resolve(SERVER_KEY_NAME); } + @JsonIgnore + public boolean isActive() { + return !INACTIVE_STATES.contains(getState()); + } + public void updateCertDirPath(Path certDirPath) { updateCertDirPath(certDirPath, null); } diff --git a/managed/src/main/java/com/yugabyte/yw/models/helpers/NodeConfigValidator.java b/managed/src/main/java/com/yugabyte/yw/models/helpers/NodeConfigValidator.java index db83b70a05e5..f6a06acb581a 100644 --- a/managed/src/main/java/com/yugabyte/yw/models/helpers/NodeConfigValidator.java +++ b/managed/src/main/java/com/yugabyte/yw/models/helpers/NodeConfigValidator.java @@ -146,7 +146,8 @@ public Map validateNodeConfigs( boolean canConnect = sshIntoNode(provider, nodeData, operation); nodeConfigs.add(new NodeConfig(Type.SSH_ACCESS, String.valueOf(canConnect))); - if (operation == Operation.CONFIGURE && nodeAgentClient.isClientEnabled(provider)) { + if (operation == Operation.CONFIGURE + && nodeAgentClient.isClientEnabled(provider, null /* Universe */)) { canConnect = connectToNodeAgent(provider, nodeData, operation); nodeConfigs.add(new NodeConfig(Type.NODE_AGENT_ACCESS, String.valueOf(canConnect))); } @@ -349,12 +350,12 @@ private boolean isNodeConfigRequired(ValidationData input) { case SSH_ACCESS: { return input.getOperation() == Operation.PROVISION - || !nodeAgentClient.isClientEnabled(provider); + || !nodeAgentClient.isClientEnabled(provider, null /* Universe */); } case NODE_AGENT_ACCESS: { return input.getOperation() == Operation.CONFIGURE - && nodeAgentClient.isClientEnabled(provider); + && nodeAgentClient.isClientEnabled(provider, null /* Universe */); } case VM_MAX_MAP_COUNT: { diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/NodeAgentEnablerTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/NodeAgentEnablerTest.java index 7c28bb93a950..3fc6fb1b8f0e 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/NodeAgentEnablerTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/NodeAgentEnablerTest.java @@ -63,7 +63,6 @@ public class NodeAgentEnablerTest extends FakeDBApplication { private NodeAgentInstaller mockNodeAgentInstaller; private NodeAgentEnabler nodeAgentEnabler; private TestUniverseTaskBase universeTaskBase; - private BaseTaskDependencies baseTaskDependencies; @Before public void setUp() { @@ -81,14 +80,16 @@ public void setUp() { settableRuntimeConfigFactory = app.injector().instanceOf(SettableRuntimeConfigFactory.class); platformExecutorFactory = app.injector().instanceOf(PlatformExecutorFactory.class); platformScheduler = app.injector().instanceOf(PlatformScheduler.class); - baseTaskDependencies = app.injector().instanceOf(BaseTaskDependencies.class); executorService = Executors.newCachedThreadPool(); mockNodeAgentInstaller = mock(NodeAgentInstaller.class); nodeAgentEnabler = new NodeAgentEnabler( confGetter, platformExecutorFactory, platformScheduler, mockNodeAgentInstaller); nodeAgentEnabler.setUniverseInstallerExecutor(executorService); - universeTaskBase = new TestUniverseTaskBase(baseTaskDependencies); + nodeAgentEnabler.enable(); + universeTaskBase = + new TestUniverseTaskBase( + app.injector().instanceOf(BaseTaskDependencies.class), nodeAgentEnabler); } @After @@ -99,14 +100,25 @@ public void tearDown() { } private static class TestUniverseTaskBase extends UniverseTaskBase { + private final NodeAgentEnabler nodeAgentEnabler; private final RunnableTask runnableTask; - public TestUniverseTaskBase(BaseTaskDependencies baseTaskDependencies) { + public TestUniverseTaskBase( + BaseTaskDependencies baseTaskDependencies, NodeAgentEnabler nodeAgentEnabler) { super(baseTaskDependencies); + this.nodeAgentEnabler = nodeAgentEnabler; runnableTask = mock(RunnableTask.class); taskParams = mock(UniverseTaskParams.class); } + @Override + protected T getInstanceOf(Class clazz) { + if (clazz == NodeAgentEnabler.class) { + return clazz.cast(nodeAgentEnabler); + } + return super.getInstanceOf(clazz); + } + public UniverseTaskParams getMockParams() { return (UniverseTaskParams) super.taskParams(); } diff --git a/managed/src/test/java/com/yugabyte/yw/common/NodeAgentClientTest.java b/managed/src/test/java/com/yugabyte/yw/common/NodeAgentClientTest.java index 78aa0f1c8abf..b4eb01ac2c71 100644 --- a/managed/src/test/java/com/yugabyte/yw/common/NodeAgentClientTest.java +++ b/managed/src/test/java/com/yugabyte/yw/common/NodeAgentClientTest.java @@ -10,6 +10,7 @@ import com.google.common.collect.ImmutableList; import com.google.protobuf.ByteString; import com.typesafe.config.Config; +import com.yugabyte.yw.commissioner.NodeAgentEnabler; import com.yugabyte.yw.common.NodeAgentClient.NodeAgentUpgradeParam; import com.yugabyte.yw.common.config.RuntimeConfGetter; import com.yugabyte.yw.controllers.handlers.NodeAgentHandler; @@ -51,6 +52,7 @@ public class NodeAgentClientTest extends FakeDBApplication { private NodeAgentClient nodeAgentClient; private NodeAgentHandler nodeAgentHandler; + private NodeAgentEnabler nodeAgentEnabler; private Customer customer; private NodeAgent nodeAgent; private NodeAgentImplBase nodeAgentImpl; @@ -137,7 +139,11 @@ public void update( grpcCleanup.register(InProcessChannelBuilder.forName(serverName).directExecutor().build()); nodeAgentClient = - new NodeAgentClient(mock(Config.class), mock(RuntimeConfGetter.class), config -> channel); + new NodeAgentClient( + mock(Config.class), + mock(RuntimeConfGetter.class), + com.google.inject.util.Providers.of(mock(NodeAgentEnabler.class)), + config -> channel); } static class UploadFileRequestObserver implements StreamObserver { diff --git a/managed/src/test/java/com/yugabyte/yw/common/NodeManagerTest.java b/managed/src/test/java/com/yugabyte/yw/common/NodeManagerTest.java index f05fd61ec94c..94129b509f58 100644 --- a/managed/src/test/java/com/yugabyte/yw/common/NodeManagerTest.java +++ b/managed/src/test/java/com/yugabyte/yw/common/NodeManagerTest.java @@ -530,7 +530,7 @@ public void setUp() { when(runtimeConfigFactory.forProvider(any())).thenReturn(mockConfig); when(runtimeConfigFactory.forUniverse(any())).thenReturn(app.config()); when(runtimeConfigFactory.globalRuntimeConf()).thenReturn(mockConfig); - when(nodeAgentClient.maybeGetNodeAgent(any(), any())).thenReturn(Optional.empty()); + when(nodeAgentClient.maybeGetNodeAgent(any(), any(), any())).thenReturn(Optional.empty()); createTempFile("node_manager_test_ca.crt", "test-cert"); when(mockConfGetter.getConfForScope(any(Universe.class), eq(UniverseConfKeys.nfsDirs))) .thenReturn("/tmp/nfs,/nfs"); From a3c7a31ff637dba2784aeb31404df4b6023b8cf1 Mon Sep 17 00:00:00 2001 From: Muthu Chidambaram Date: Fri, 13 Sep 2024 06:33:09 +0000 Subject: [PATCH 08/75] [PLAT-15052][PLAT-8144] Airgap improvement to preprovision Summary: Issues in customer environments with airgap provisioning flows on cloud providers. The particular difference comes with running preprovision for cloud providers and not onprem. This diff sets the air_gap extra var earlier in python code so Ansible has access to it and skips GPG key and other package install steps. Test Plan: Run cloud provider airgap provisioning, ensure success and relevant tasks are skipped. Reviewers: svarshney, anijhawan, sanketh Reviewed By: svarshney Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38032 --- managed/devops/opscli/ybops/cloud/common/method.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/managed/devops/opscli/ybops/cloud/common/method.py b/managed/devops/opscli/ybops/cloud/common/method.py index 38c825c00c61..7f89711c4fed 100644 --- a/managed/devops/opscli/ybops/cloud/common/method.py +++ b/managed/devops/opscli/ybops/cloud/common/method.py @@ -808,6 +808,9 @@ def callback(self, args): # copy and run the script self.cloud.execute_boot_script(args, self.extra_vars) + if args.air_gap: + self.extra_vars.update({"air_gap": args.air_gap}) + if not args.skip_preprovision: self.preprovision(args) self.extra_vars["device_names"] = self.get_device_names(args, host_info) @@ -815,8 +818,6 @@ def callback(self, args): self.extra_vars.update(self.get_server_host_port(host_info, args.custom_ssh_port)) if args.local_package_path: self.extra_vars.update({"local_package_path": args.local_package_path}) - if args.air_gap: - self.extra_vars.update({"air_gap": args.air_gap}) if args.node_exporter_port: self.extra_vars.update({"node_exporter_port": args.node_exporter_port}) if args.install_node_exporter: From 24a5af0a55ba171e2db2cf224e32e3afad5f6f56 Mon Sep 17 00:00:00 2001 From: Fizaa Luthra Date: Thu, 12 Sep 2024 11:43:47 -0400 Subject: [PATCH 09/75] [#3946] YSQL: Add support for ALTER TABLE ... VALIDATE CONSTRAINT Summary: `ALTER TABLE ... VALIDATE CONSTRAINT` is used to validate foreign-key and check constraints that were previously created using the `NOT VALID` syntax. The command performs the required constraint checking and marks the constraint as valid if successful, or returns an error if there is a violation. It will increment the schema version of the table and any of its children, without making any DocDB schema changes. The increment is done (similar to `ADD CONSTRAINT`, `SET NOT NULL` operations) to abort any concurrent writes with a schema version mismatch error. Note: some writes (that may cause constraint violations) may still slip through, but this is a well known issue that applies to other `ALTER`s as well and it will be fixed by table locking. Code changes: - Enable the command in the parser - YBCPrepareAlterTableCmd: prepare YB alter handles to perform a schema version increment for `AT_ValidateConstraint` and `AT_ValidateConstraintRecurse` commands. Jira: DB-1922 Test Plan: ./yb_build.sh --java-test 'org.yb.pgsql.TestAlterTableWithConcurrentTxn' ./yb_build.sh --java-test 'org.yb.pgsql.TestPgRegressTable' Reviewers: myang Reviewed By: myang Subscribers: yql Differential Revision: https://phorge.dev.yugabyte.com/D37907 --- .../TestAlterTableWithConcurrentTxn.java | 33 ++++++++++- src/postgres/src/backend/commands/ybccmds.c | 3 + src/postgres/src/backend/parser/gram.y | 1 - .../test/regress/expected/yb_alter_table.out | 44 ++++++++++++++ .../regress/expected/yb_pg_alter_table.out | 53 +++++++++++++++-- .../regress/expected/yb_pg_foreign_data.out | 12 ++-- .../src/test/regress/sql/yb_alter_table.sql | 40 +++++++++++++ .../test/regress/sql/yb_pg_alter_table.sql | 58 +++++++++++++++++++ src/yb/yql/pgwrapper/pg_fkey-test.cc | 12 +--- 9 files changed, 231 insertions(+), 25 deletions(-) diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestAlterTableWithConcurrentTxn.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestAlterTableWithConcurrentTxn.java index 8aae8277d121..cbdf9c8ea0f1 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestAlterTableWithConcurrentTxn.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestAlterTableWithConcurrentTxn.java @@ -48,6 +48,8 @@ protected Map getMasterFlags() { "no owned sequence found"; private static final String NO_TUPLE_FOUND_ERROR = "could not find tuple for parent"; + private static final String CONSTRAINT_VIOLATION_ERROR = + "violates check constraint"; private static final String NO_ERROR = ""; private static final boolean executeDmlBeforeAlter = true; private static final boolean executeDmlAfterAlter = false; @@ -63,7 +65,8 @@ private static enum AlterCommand { ATTACH_PARTITION, DETACH_PARTITION, ADD_FOREIGN_KEY, DROP_FOREIGN_KEY, ADD_PRIMARY_KEY, DROP_PRIMARY_KEY, - ADD_COLUMN_WITH_VOLATILE_DEFAULT, ALTER_TYPE} + ADD_COLUMN_WITH_VOLATILE_DEFAULT, ALTER_TYPE, + VALIDATE_CONSTRAINT} private void prepareAndPopulateTable(AlterCommand alterCommand, String tableName) throws Exception { @@ -111,6 +114,9 @@ private void prepareAndPopulateTable(AlterCommand alterCommand, String tableName if (alterCommand == AlterCommand.ALTER_TYPE) { createTableQuery += ", d TEXT"; } + if (alterCommand == AlterCommand.VALIDATE_CONSTRAINT) { + createTableQuery += ", e INT"; + } createTableQuery += ")"; if (alterCommand == AlterCommand.ATTACH_PARTITION || alterCommand == AlterCommand.DETACH_PARTITION) { @@ -149,6 +155,9 @@ private void prepareAndPopulateTable(AlterCommand alterCommand, String tableName } else if (alterCommand == AlterCommand.DROP_FOREIGN_KEY) { statement.execute("CREATE TABLE " + tableName + "_f (a INT, " + "CONSTRAINT c FOREIGN KEY (a) REFERENCES " + tableName + "(a))"); + } else if (alterCommand == AlterCommand.VALIDATE_CONSTRAINT) { + statement.execute( + "ALTER TABLE " + tableName + " ADD CONSTRAINT check_valid CHECK (e > 0) NOT VALID"); } // Populate the table @@ -168,7 +177,8 @@ private void prepareAndPopulateTable(AlterCommand alterCommand, String tableName case DETACH_PARTITION: case ADD_PRIMARY_KEY: case DROP_PRIMARY_KEY: - case ADD_COLUMN_WITH_VOLATILE_DEFAULT: { + case ADD_COLUMN_WITH_VOLATILE_DEFAULT: + case VALIDATE_CONSTRAINT: { statement.execute("INSERT INTO " + tableName + " VALUES (1, 'foo')"); break; } @@ -281,6 +291,9 @@ private String getAlterSql(AlterCommand alterCommand, String tableName) throws E return rewriteTestFlag + "ALTER TABLE " + tableName + " ALTER COLUMN d TYPE int USING length(d)"; } + case VALIDATE_CONSTRAINT: { + return "ALTER TABLE " + tableName + " VALIDATE CONSTRAINT check_valid"; + } default: { throw new Exception("Alter command type " + alterCommand + " not supported"); } @@ -372,6 +385,13 @@ private String getDmlSql(Dml dmlToExecute, AlterCommand alterCommand, return "INSERT INTO " + tableName + " VALUES (2, 'bar', 6)"; } } + case VALIDATE_CONSTRAINT: { + if (useOriginalSchema) { + return "INSERT INTO " + tableName + " VALUES (2, 'bar')"; + } else { + return "INSERT INTO " + tableName + " VALUES (2, 'bar', -1)"; + } + } default: { throw new Exception("Alter command type " + alterCommand + " not supported"); } @@ -400,6 +420,7 @@ private String getDmlSql(Dml dmlToExecute, AlterCommand alterCommand, case DROP_PRIMARY_KEY: case ADD_COLUMN_WITH_VOLATILE_DEFAULT: case ALTER_TYPE: + case VALIDATE_CONSTRAINT: case ATTACH_PARTITION: return "SELECT a FROM " + tableName + " WHERE a = 1"; case DETACH_PARTITION: @@ -791,9 +812,15 @@ public void testDmlTransactionAfterAlterOnCurrentResourceWithoutCachedMetadata() assumeFalse(BasePgSQLTest.UNIQUE_PHYSICAL_CONNS_NEEDED, isTestRunningWithConnectionManager()); for (AlterCommand alterType : AlterCommand.values()) { + String expectedErrorOnInsert; + if (alterType == AlterCommand.VALIDATE_CONSTRAINT) { + expectedErrorOnInsert = CONSTRAINT_VIOLATION_ERROR; + } else { + expectedErrorOnInsert = NO_ERROR; + } LOG.info("Run INSERT txn after ALTER " + alterType + " cache set " + !cacheMetadataSetTrue); runDmlTxnWithAlterOnCurrentResource(Dml.INSERT, alterType, !cacheMetadataSetTrue, - executeDmlAfterAlter, NO_ERROR); + executeDmlAfterAlter, expectedErrorOnInsert); LOG.info("Run SELECT txn after ALTER " + alterType + " cache set " + !cacheMetadataSetTrue); runDmlTxnWithAlterOnCurrentResource(Dml.SELECT, alterType, !cacheMetadataSetTrue, executeDmlAfterAlter, NO_ERROR); diff --git a/src/postgres/src/backend/commands/ybccmds.c b/src/postgres/src/backend/commands/ybccmds.c index 26ca0f6b5a50..858e11ba87b7 100644 --- a/src/postgres/src/backend/commands/ybccmds.c +++ b/src/postgres/src/backend/commands/ybccmds.c @@ -1211,6 +1211,7 @@ YBCPrepareAlterTableCmd(AlterTableCmd* cmd, Relation rel, List *handles, case AT_DropColumnRecurse: case AT_AddConstraintRecurse: case AT_DropConstraintRecurse: + case AT_ValidateConstraintRecurse: break; default: /* @@ -1382,6 +1383,8 @@ YBCPrepareAlterTableCmd(AlterTableCmd* cmd, Relation rel, List *handles, case AT_AttachPartition: case AT_DetachPartition: case AT_SetTableSpace: + case AT_ValidateConstraint: + case AT_ValidateConstraintRecurse: { Assert(cmd->subtype != AT_DropConstraint); if (cmd->subtype == AT_AlterColumnType) diff --git a/src/postgres/src/backend/parser/gram.y b/src/postgres/src/backend/parser/gram.y index b037f3f32afc..76ed6e9aec34 100644 --- a/src/postgres/src/backend/parser/gram.y +++ b/src/postgres/src/backend/parser/gram.y @@ -2443,7 +2443,6 @@ alter_table_cmd: /* ALTER TABLE VALIDATE CONSTRAINT ... */ | VALIDATE CONSTRAINT name { - parser_ybc_signal_unsupported(@1, "ALTER TABLE VALIDATE CONSTRAINT", 1124); AlterTableCmd *n = makeNode(AlterTableCmd); n->subtype = AT_ValidateConstraint; n->name = $3; diff --git a/src/postgres/src/test/regress/expected/yb_alter_table.out b/src/postgres/src/test/regress/expected/yb_alter_table.out index 5985b0ec6377..2adf4ff135b8 100644 --- a/src/postgres/src/test/regress/expected/yb_alter_table.out +++ b/src/postgres/src/test/regress/expected/yb_alter_table.out @@ -449,3 +449,47 @@ ALTER TABLE test_dropcolumn ADD COLUMN d test_dropcolumn_type; DROP TYPE test_dropcolumn_type CASCADE; -- should drop the column d NOTICE: drop cascades to column d of table test_dropcolumn ALTER TABLE test_dropcolumn ADD COLUMN d int; +-- +-- Test for ALTER TABLE ... VALIDATE CONSTRAINT +-- +-- check constraints +CREATE TABLE test_validate_constraint(a int, b int); +INSERT INTO test_validate_constraint VALUES (1, 1), (2, 2), (3, 3), (-1, -1); +ALTER TABLE test_validate_constraint ADD CONSTRAINT test_validate_constraint_check CHECK (a > 0) NOT VALID; +ALTER TABLE test_validate_constraint VALIDATE CONSTRAINT test_validate_constraint_check; -- should fail. +ERROR: check constraint "test_validate_constraint_check" is violated by some row +DELETE FROM test_validate_constraint WHERE a = -1; +ALTER TABLE test_validate_constraint VALIDATE CONSTRAINT test_validate_constraint_check; +-- foreign key constraints +CREATE TABLE test_validate_constraint_parent(a int PRIMARY KEY); +CREATE TABLE test_validate_constraint_child(b int); +INSERT INTO test_validate_constraint_parent VALUES (1), (2); +INSERT INTO test_validate_constraint_child VALUES (1), (2), (3); +ALTER TABLE test_validate_constraint_child ADD CONSTRAINT test_validate_constraint_child_fk + FOREIGN KEY (b) REFERENCES test_validate_constraint_parent(a) NOT VALID; +ALTER TABLE test_validate_constraint_child + VALIDATE CONSTRAINT test_validate_constraint_child_fk; -- should fail. +ERROR: insert or update on table "test_validate_constraint_child" violates foreign key constraint "test_validate_constraint_child_fk" +DETAIL: Key (b)=(3) is not present in table "test_validate_constraint_parent". +INSERT INTO test_validate_constraint_parent VALUES (3); +ALTER TABLE test_validate_constraint_child + VALIDATE CONSTRAINT test_validate_constraint_child_fk; +-- partitioned tables +CREATE TABLE test_validate_constraint_part (a int) PARTITION BY RANGE (a); +CREATE TABLE test_validate_constraint_part_1 + PARTITION OF test_validate_constraint_part FOR VALUES FROM (1) TO (6); +CREATE TABLE test_validate_constraint_part_2 + PARTITION OF test_validate_constraint_part FOR VALUES FROM (6) TO (11); +INSERT INTO test_validate_constraint_part VALUES (generate_series(1, 10)); +ALTER TABLE test_validate_constraint_part + ADD CONSTRAINT test_validate_constraint_part_check CHECK (a % 2 = 0) NOT VALID; +ALTER TABLE test_validate_constraint_part + VALIDATE CONSTRAINT test_validate_constraint_part_check; -- should fail. +ERROR: check constraint "test_validate_constraint_part_check" is violated by some row +DELETE FROM test_validate_constraint_part WHERE a % 2 = 1 AND a < 6; +ALTER TABLE test_validate_constraint_part + VALIDATE CONSTRAINT test_validate_constraint_part_check; -- should still fail. +ERROR: check constraint "test_validate_constraint_part_check" is violated by some row +DELETE FROM test_validate_constraint_part WHERE a % 2 = 1; +ALTER TABLE test_validate_constraint_part + VALIDATE CONSTRAINT test_validate_constraint_part_check; diff --git a/src/postgres/src/test/regress/expected/yb_pg_alter_table.out b/src/postgres/src/test/regress/expected/yb_pg_alter_table.out index a87bc1bc5c8e..de820d2b23c6 100644 --- a/src/postgres/src/test/regress/expected/yb_pg_alter_table.out +++ b/src/postgres/src/test/regress/expected/yb_pg_alter_table.out @@ -292,6 +292,55 @@ Check constraints: DROP TABLE constraint_rename_cache; DROP TABLE like_constraint_rename_cache; +-- FOREIGN KEY CONSTRAINT adding TEST +CREATE TABLE attmp2 (a int primary key); +CREATE TABLE attmp3 (a int, b int); +CREATE TABLE attmp4 (a int, b int, unique(a,b)); +CREATE TABLE attmp5 (a int, b int); +-- Insert rows into attmp2 (pktable) +INSERT INTO attmp2 values (1); +INSERT INTO attmp2 values (2); +INSERT INTO attmp2 values (3); +INSERT INTO attmp2 values (4); +-- Insert rows into attmp3 +INSERT INTO attmp3 values (1,10); +INSERT INTO attmp3 values (1,20); +INSERT INTO attmp3 values (5,50); +-- Try (and fail) to add constraint due to invalid source columns +ALTER TABLE attmp3 add constraint attmpconstr foreign key(c) references attmp2 match full; +ERROR: column "c" referenced in foreign key constraint does not exist +-- Try (and fail) to add constraint due to invalid destination columns explicitly given +ALTER TABLE attmp3 add constraint attmpconstr foreign key(a) references attmp2(b) match full; +ERROR: column "b" referenced in foreign key constraint does not exist +-- Try (and fail) to add constraint due to invalid data +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full; +ERROR: insert or update on table "attmp3" violates foreign key constraint "attmpconstr" +DETAIL: Key (a)=(5) is not present in table "attmp2". +-- Delete failing row +DELETE FROM attmp3 where a=5; +-- Try (and succeed) +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full; +ALTER TABLE attmp3 drop constraint attmpconstr; +INSERT INTO attmp3 values (5,50); +-- Try NOT VALID and then VALIDATE CONSTRAINT, but fails. Delete failure then re-validate +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full NOT VALID; +ALTER TABLE attmp3 validate constraint attmpconstr; +ERROR: insert or update on table "attmp3" violates foreign key constraint "attmpconstr" +DETAIL: Key (a)=(5) is not present in table "attmp2". +-- Delete failing row +DELETE FROM attmp3 where a=5; +-- Try (and succeed) and repeat to show it works on already valid constraint +ALTER TABLE attmp3 validate constraint attmpconstr; +ALTER TABLE attmp3 validate constraint attmpconstr; +-- Try a non-verified CHECK constraint +ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail +ERROR: check constraint "b_greater_than_ten" is violated by some row +ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails +ERROR: check constraint "b_greater_than_ten" is violated by some row +DELETE FROM attmp3 WHERE NOT b > 10; +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds -- test inheritance create table renameColumn (a int); create table renameColumnChild (b int) inherits (renameColumn); @@ -1085,10 +1134,6 @@ create table parted_validate_test (a int) partition by list (a); create table parted_validate_test_1 partition of parted_validate_test for values in (0, 1); alter table parted_validate_test add constraint parted_validate_test_chka check (a > 0) not valid; alter table parted_validate_test validate constraint parted_validate_test_chka; -ERROR: ALTER TABLE VALIDATE CONSTRAINT not supported yet -LINE 1: alter table parted_validate_test validate constraint parted_... - ^ -HINT: See https://github.com/yugabyte/yugabyte-db/issues/1124. React with thumbs up to raise its priority drop table parted_validate_test; -- test alter column options CREATE TABLE attmp(i integer); diff --git a/src/postgres/src/test/regress/expected/yb_pg_foreign_data.out b/src/postgres/src/test/regress/expected/yb_pg_foreign_data.out index b53584ed8192..cf750ccc83f7 100644 --- a/src/postgres/src/test/regress/expected/yb_pg_foreign_data.out +++ b/src/postgres/src/test/regress/expected/yb_pg_foreign_data.out @@ -1814,10 +1814,6 @@ FDW options: (delimiter ',', quote '"', "be quoted" 'value') -- -- VALIDATE CONSTRAINT need do nothing on foreign tables ALTER TABLE fd_pt1 VALIDATE CONSTRAINT fd_pt1chk3; -ERROR: ALTER TABLE VALIDATE CONSTRAINT not supported yet -LINE 1: ALTER TABLE fd_pt1 VALIDATE CONSTRAINT fd_pt1chk3; - ^ -HINT: See https://github.com/yugabyte/yugabyte-db/issues/1124. React with thumbs up to raise its priority \d+ fd_pt1 Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -1826,7 +1822,7 @@ HINT: See https://github.com/yugabyte/yugabyte-db/issues/1124. React with thumb c2 | text | | | | extended | | c3 | date | | | | plain | | Check constraints: - "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID + "fd_pt1chk3" CHECK (c2 <> ''::text) -- ^^^ originally extra: -- Child tables: ft2 @@ -1862,7 +1858,7 @@ HINT: See https://github.com/yugabyte/yugabyte-db/issues/1124. React with thumb c2 | text | | | | extended | | c3 | date | | | | plain | | Check constraints: - "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID + "fd_pt1chk3" CHECK (c2 <> ''::text) -- ^^^ originally extra: -- Child tables: ft2 @@ -1897,7 +1893,7 @@ ALTER TABLE fd_pt1 SET WITHOUT OIDS; c2 | text | | | | extended | | c3 | date | | | | plain | | Check constraints: - "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID + "fd_pt1chk3" CHECK (c2 <> ''::text) -- ^^^ originally extra: -- Child tables: ft2 @@ -1933,7 +1929,7 @@ ALTER TABLE fd_pt1 RENAME CONSTRAINT fd_pt1chk3 TO f2_check; f2 | text | | | | extended | | f3 | date | | | | plain | | Check constraints: - "f2_check" CHECK (f2 <> ''::text) NOT VALID + "f2_check" CHECK (f2 <> ''::text) -- ^^^ originally extra: -- Child tables: ft2 diff --git a/src/postgres/src/test/regress/sql/yb_alter_table.sql b/src/postgres/src/test/regress/sql/yb_alter_table.sql index 5329e421d23c..6c1806e7f125 100644 --- a/src/postgres/src/test/regress/sql/yb_alter_table.sql +++ b/src/postgres/src/test/regress/sql/yb_alter_table.sql @@ -182,3 +182,43 @@ CREATE TYPE test_dropcolumn_type AS (a int, b int); ALTER TABLE test_dropcolumn ADD COLUMN d test_dropcolumn_type; DROP TYPE test_dropcolumn_type CASCADE; -- should drop the column d ALTER TABLE test_dropcolumn ADD COLUMN d int; + +-- +-- Test for ALTER TABLE ... VALIDATE CONSTRAINT +-- +-- check constraints +CREATE TABLE test_validate_constraint(a int, b int); +INSERT INTO test_validate_constraint VALUES (1, 1), (2, 2), (3, 3), (-1, -1); +ALTER TABLE test_validate_constraint ADD CONSTRAINT test_validate_constraint_check CHECK (a > 0) NOT VALID; +ALTER TABLE test_validate_constraint VALIDATE CONSTRAINT test_validate_constraint_check; -- should fail. +DELETE FROM test_validate_constraint WHERE a = -1; +ALTER TABLE test_validate_constraint VALIDATE CONSTRAINT test_validate_constraint_check; +-- foreign key constraints +CREATE TABLE test_validate_constraint_parent(a int PRIMARY KEY); +CREATE TABLE test_validate_constraint_child(b int); +INSERT INTO test_validate_constraint_parent VALUES (1), (2); +INSERT INTO test_validate_constraint_child VALUES (1), (2), (3); +ALTER TABLE test_validate_constraint_child ADD CONSTRAINT test_validate_constraint_child_fk + FOREIGN KEY (b) REFERENCES test_validate_constraint_parent(a) NOT VALID; +ALTER TABLE test_validate_constraint_child + VALIDATE CONSTRAINT test_validate_constraint_child_fk; -- should fail. +INSERT INTO test_validate_constraint_parent VALUES (3); +ALTER TABLE test_validate_constraint_child + VALIDATE CONSTRAINT test_validate_constraint_child_fk; +-- partitioned tables +CREATE TABLE test_validate_constraint_part (a int) PARTITION BY RANGE (a); +CREATE TABLE test_validate_constraint_part_1 + PARTITION OF test_validate_constraint_part FOR VALUES FROM (1) TO (6); +CREATE TABLE test_validate_constraint_part_2 + PARTITION OF test_validate_constraint_part FOR VALUES FROM (6) TO (11); +INSERT INTO test_validate_constraint_part VALUES (generate_series(1, 10)); +ALTER TABLE test_validate_constraint_part + ADD CONSTRAINT test_validate_constraint_part_check CHECK (a % 2 = 0) NOT VALID; +ALTER TABLE test_validate_constraint_part + VALIDATE CONSTRAINT test_validate_constraint_part_check; -- should fail. +DELETE FROM test_validate_constraint_part WHERE a % 2 = 1 AND a < 6; +ALTER TABLE test_validate_constraint_part + VALIDATE CONSTRAINT test_validate_constraint_part_check; -- should still fail. +DELETE FROM test_validate_constraint_part WHERE a % 2 = 1; +ALTER TABLE test_validate_constraint_part + VALIDATE CONSTRAINT test_validate_constraint_part_check; diff --git a/src/postgres/src/test/regress/sql/yb_pg_alter_table.sql b/src/postgres/src/test/regress/sql/yb_pg_alter_table.sql index 8772251e3409..5a630b01732d 100644 --- a/src/postgres/src/test/regress/sql/yb_pg_alter_table.sql +++ b/src/postgres/src/test/regress/sql/yb_pg_alter_table.sql @@ -297,6 +297,64 @@ CREATE TABLE like_constraint_rename_cache DROP TABLE constraint_rename_cache; DROP TABLE like_constraint_rename_cache; +-- FOREIGN KEY CONSTRAINT adding TEST + +CREATE TABLE attmp2 (a int primary key); + +CREATE TABLE attmp3 (a int, b int); + +CREATE TABLE attmp4 (a int, b int, unique(a,b)); + +CREATE TABLE attmp5 (a int, b int); + +-- Insert rows into attmp2 (pktable) +INSERT INTO attmp2 values (1); +INSERT INTO attmp2 values (2); +INSERT INTO attmp2 values (3); +INSERT INTO attmp2 values (4); + +-- Insert rows into attmp3 +INSERT INTO attmp3 values (1,10); +INSERT INTO attmp3 values (1,20); +INSERT INTO attmp3 values (5,50); + +-- Try (and fail) to add constraint due to invalid source columns +ALTER TABLE attmp3 add constraint attmpconstr foreign key(c) references attmp2 match full; + +-- Try (and fail) to add constraint due to invalid destination columns explicitly given +ALTER TABLE attmp3 add constraint attmpconstr foreign key(a) references attmp2(b) match full; + +-- Try (and fail) to add constraint due to invalid data +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full; + +-- Delete failing row +DELETE FROM attmp3 where a=5; + +-- Try (and succeed) +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full; +ALTER TABLE attmp3 drop constraint attmpconstr; + +INSERT INTO attmp3 values (5,50); + +-- Try NOT VALID and then VALIDATE CONSTRAINT, but fails. Delete failure then re-validate +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full NOT VALID; +ALTER TABLE attmp3 validate constraint attmpconstr; + +-- Delete failing row +DELETE FROM attmp3 where a=5; + +-- Try (and succeed) and repeat to show it works on already valid constraint +ALTER TABLE attmp3 validate constraint attmpconstr; +ALTER TABLE attmp3 validate constraint attmpconstr; + +-- Try a non-verified CHECK constraint +ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail +ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails +DELETE FROM attmp3 WHERE NOT b > 10; +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds + -- test inheritance create table renameColumn (a int); diff --git a/src/yb/yql/pgwrapper/pg_fkey-test.cc b/src/yb/yql/pgwrapper/pg_fkey-test.cc index 0f8b6bfeaf34..f04af6ddae24 100644 --- a/src/yb/yql/pgwrapper/pg_fkey-test.cc +++ b/src/yb/yql/pgwrapper/pg_fkey-test.cc @@ -332,17 +332,11 @@ TEST_F(PgFKeyTest, [&conn] { return AddFKConstraint(&conn, true /* skip_check */); })).read; ASSERT_EQ(add_fk_rpc_count, 0); - /* Note: VALIDATE CONSTRAINT is not yet supported. Uncomment next lines after fixing of #3946 const auto validate_fk_rpc_count = ASSERT_RESULT(rpc_count_->Delta( - [&conn] { return conn.Execute("ALTER TABLE child VALIDATE CONSTRAINT child2parent"); })).read; + [&conn] { return conn.ExecuteFormat( + "ALTER TABLE $0 VALIDATE CONSTRAINT $1", kFKTable, kConstraintName); })).read; - ASSERT_EQ(validate_fk_rpc_count, 2);*/ - - // Check that VALIDATE CONSTRAINT is not supported - ASSERT_STR_CONTAINS( - conn.ExecuteFormat( - "ALTER TABLE $0 VALIDATE CONSTRAINT $1", kFKTable, kConstraintName).ToString(), - "not supported yet"); + ASSERT_EQ(validate_fk_rpc_count, 2); } // Test checks FK correctness in case of FK check requires type casting. From 2ac2e98266668690fc7adb1f43321731c351e160 Mon Sep 17 00:00:00 2001 From: Fizaa Luthra Date: Wed, 4 Sep 2024 10:07:34 -0400 Subject: [PATCH 10/75] [#23118] YSQL: Add support for ADD PRIMARY KEY ... USING INDEX Summary: This diff adds support for adding a primary key using a unique index by performing table rewrite. Code changes: - ATExecAddIndexConstraint in tablecmds.c: - If we are adding a primary key constraint on a YB relation, mark the table to be rewritten by alter table phase 3. - Skip copying split options (number of tablets) when we are adding a range based primary key. - Since primary keys are implicit to the DocDB table, drop the DocDB table associated with the secondary unique index. - pg_yb_utils.c: - Add function `YbGetIndexKeySortOrdering` to retrieve the sort ordering of the first key element of an index. - YbATIsRangePk: minor change to make the function take the sort ordering instead as a parameter instead of the index statement. Note: This operation is not supported when the unique index has key columns in descending order -- this limitation exists in Postgres, as Postgres does not allow adding primary keys with `DESC` ordering. Although we can support this in YB, it is omitted for now as the motivation for this feature is to facilitate migrations. Jira: DB-12051 Test Plan: new tests in `yb_alter_table_rewrite`, `yb_pg_create_index`, `yb_pg_index_including` ./yb_build.sh --java-test 'org.yb.pgsql.TestPgRegressTable' ./yb_build.sh --java-test 'org.yb.pgsql.TestPgRegressPgMisc' ./yb_build.sh --java-test 'org.yb.pgsql.TestPgRegressPgMiscIndependent' Reviewers: myang Reviewed By: myang Subscribers: smishra, yql Differential Revision: https://phorge.dev.yugabyte.com/D37741 --- src/postgres/src/backend/commands/tablecmds.c | 54 +++++++++---- .../src/backend/utils/misc/pg_yb_utils.c | 16 ++++ src/postgres/src/include/pg_yb_utils.h | 2 + .../expected/yb_alter_table_rewrite.out | 77 ++++++++++++++++++- .../regress/expected/yb_pg_create_index.out | 65 ++++++++++++++++ .../expected/yb_pg_index_including.out | 25 +++--- .../regress/sql/yb_alter_table_rewrite.sql | 29 ++++++- .../test/regress/sql/yb_pg_create_index.sql | 34 ++++++++ .../regress/sql/yb_pg_index_including.sql | 17 ++-- 9 files changed, 278 insertions(+), 41 deletions(-) diff --git a/src/postgres/src/backend/commands/tablecmds.c b/src/postgres/src/backend/commands/tablecmds.c index 6bdb116fedba..990c434ee174 100644 --- a/src/postgres/src/backend/commands/tablecmds.c +++ b/src/postgres/src/backend/commands/tablecmds.c @@ -444,7 +444,7 @@ static ObjectAddress ATExecAddConstraint(List **wqueue, Constraint *newConstraint, bool recurse, bool is_readd, LOCKMODE lockmode); static ObjectAddress ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel, - IndexStmt *stmt, LOCKMODE lockmode); + IndexStmt *stmt, LOCKMODE lockmode, List **yb_wqueue); static ObjectAddress ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, Constraint *constr, @@ -551,8 +551,8 @@ static Relation YbATCloneRelationSetColumnType(Relation old_rel, Oid altered_collation_id, TypeName *altered_type_name, List *new_column_values); -static bool YbATIsRangePk(IndexStmt *stmt, - bool is_colocated, bool is_tablegroup); +static bool YbATIsRangePk(SortByDir ordering, bool is_colocated, + bool is_tablegroup); static void YbATSetPKRewriteChildPartitions(List **yb_wqueue, AlteredTableInfo *tab, bool skip_copy_split_options); @@ -4631,7 +4631,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation *mutable_rel, break; case AT_AddIndexConstraint: /* ADD CONSTRAINT USING INDEX */ address = ATExecAddIndexConstraint(tab, rel, (IndexStmt *) cmd->def, - lockmode); + lockmode, wqueue); break; case AT_AlterConstraint: /* ALTER CONSTRAINT */ address = ATExecAlterConstraint(rel, cmd, false, false, lockmode); @@ -7639,7 +7639,8 @@ ATExecAddIndex(List **yb_wqueue, AlteredTableInfo *tab, Relation *mutable_rel, { YbGetTableProperties(*mutable_rel); /* Don't copy split options if we are creating a range key. */ - bool skip_copy_split_options = YbATIsRangePk(stmt, + bool skip_copy_split_options = YbATIsRangePk( + linitial_node(IndexElem, stmt->indexParams)->ordering, (*mutable_rel)->yb_table_properties->is_colocated, OidIsValid( (*mutable_rel)->yb_table_properties->tablegroup_oid)); if (!skip_build) @@ -7683,7 +7684,8 @@ ATExecAddIndex(List **yb_wqueue, AlteredTableInfo *tab, Relation *mutable_rel, */ static ObjectAddress ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel, - IndexStmt *stmt, LOCKMODE lockmode) + IndexStmt *stmt, LOCKMODE lockmode, + List **yb_wqueue) { Oid index_oid = stmt->indexOid; Relation indexRel; @@ -7707,11 +7709,6 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ALTER TABLE / ADD CONSTRAINT USING INDEX is not supported on partitioned tables"))); - if (IsYugaByteEnabled() && stmt->primary) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ALTER TABLE / ADD CONSTRAINT PRIMARY KEY USING INDEX is not supported"))); - indexRel = index_open(index_oid, AccessShareLock); indexName = pstrdup(RelationGetRelationName(indexRel)); @@ -7722,6 +7719,29 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel, if (!indexInfo->ii_Unique) elog(ERROR, "index \"%s\" is not unique", indexName); + /* + * YB: Adding a primary key requires table rewrite. + * We do not need to rewrite any children as this operation is not supported + * on partitioned tables (checked above). + */ + if (IsYBRelation(rel) && stmt->primary) + { + YbGetTableProperties(rel); + /* Don't copy split options if we are creating a range key. */ + bool skip_copy_split_options = YbATIsRangePk( + YbGetIndexKeySortOrdering(indexRel), + rel->yb_table_properties->is_colocated, OidIsValid( + rel->yb_table_properties->tablegroup_oid)); + tab->rewrite |= YB_AT_REWRITE_ALTER_PRIMARY_KEY; + tab->yb_skip_copy_split_options = tab->yb_skip_copy_split_options + || skip_copy_split_options; + /* + * Since this index is going to be upgraded to a pkey, we can drop the + * DocDB table associated with the secondary index. + */ + YBCDropIndex(indexRel); + } + /* * Determine name to assign to constraint. We require a constraint to * have the same name as the underlying index; therefore, use the index's @@ -16249,7 +16269,8 @@ AttachPartitionEnsureIndexes(Relation rel, Relation attachrel, List **yb_wqueue) tab->rewrite = YB_AT_REWRITE_ALTER_PRIMARY_KEY; YbGetTableProperties(attachrel); /* Don't copy split options if we are creating a range key. */ - bool skip_copy_split_options = YbATIsRangePk(stmt, + bool skip_copy_split_options = YbATIsRangePk( + linitial_node(IndexElem, stmt->indexParams)->ordering, attachrel->yb_table_properties->is_colocated, OidIsValid( attachrel->yb_table_properties->tablegroup_oid)); @@ -17596,11 +17617,11 @@ YbATGetRenameStmt(const char *namespace_name, const char *current_name, } static bool -YbATIsRangePk(IndexStmt *stmt, bool is_colocated, bool is_tablegroup) +YbATIsRangePk(SortByDir ordering, bool is_colocated, bool is_tablegroup) { SortByDir yb_ordering = - YbSortOrdering(linitial_node(IndexElem, stmt->indexParams)->ordering, - is_colocated, is_tablegroup, true /* is_first_key */); + YbSortOrdering(ordering, is_colocated, is_tablegroup, + true /* is_first_key */); if (yb_ordering == SORTBY_ASC || yb_ordering == SORTBY_DESC) return true; @@ -18924,7 +18945,8 @@ YbATCloneRelationSetPrimaryKey(Relation old_rel, IndexStmt *stmt, if (stmt) is_range_pk = YbATIsRangePk( - stmt, old_rel->yb_table_properties->is_colocated, + linitial_node(IndexElem, stmt->indexParams)->ordering, + old_rel->yb_table_properties->is_colocated, OidIsValid(old_rel->yb_table_properties->tablegroup_oid)); /* diff --git a/src/postgres/src/backend/utils/misc/pg_yb_utils.c b/src/postgres/src/backend/utils/misc/pg_yb_utils.c index da8c9f8ff978..b3547855cebb 100644 --- a/src/postgres/src/backend/utils/misc/pg_yb_utils.c +++ b/src/postgres/src/backend/utils/misc/pg_yb_utils.c @@ -5012,3 +5012,19 @@ bool YbIsAttrPrimaryKeyColumn(Relation rel, AttrNumber attnum) return bms_is_member(attnum - YBGetFirstLowInvalidAttributeNumber(rel), pkey); } + +/* Retrieve the sort ordering of the first key element of an index. */ +SortByDir YbGetIndexKeySortOrdering(Relation indexRel) +{ + if (IndexRelationGetNumberOfKeyAttributes(indexRel) == 0) + return SORTBY_DEFAULT; + /* + * If there are key columns, check the indoption of the first + * key attribute. + */ + if (indexRel->rd_indoption[0] & INDOPTION_HASH) + return SORTBY_HASH; + if (indexRel->rd_indoption[0] & INDOPTION_DESC) + return SORTBY_DESC; + return SORTBY_ASC; +} diff --git a/src/postgres/src/include/pg_yb_utils.h b/src/postgres/src/include/pg_yb_utils.h index c48148e68527..c7d53142f5da 100644 --- a/src/postgres/src/include/pg_yb_utils.h +++ b/src/postgres/src/include/pg_yb_utils.h @@ -1177,4 +1177,6 @@ extern bool YbIsYsqlConnMgrWarmupModeEnabled(); bool YbIsAttrPrimaryKeyColumn(Relation rel, AttrNumber attnum); +SortByDir YbGetIndexKeySortOrdering(Relation indexRel); + #endif /* PG_YB_UTILS_H */ diff --git a/src/postgres/src/test/regress/expected/yb_alter_table_rewrite.out b/src/postgres/src/test/regress/expected/yb_alter_table_rewrite.out index fb0ac6d574b4..21cd911221a1 100644 --- a/src/postgres/src/test/regress/expected/yb_alter_table_rewrite.out +++ b/src/postgres/src/test/regress/expected/yb_alter_table_rewrite.out @@ -293,6 +293,21 @@ SELECT num_tablets, num_hash_key_columns, is_colocated FROM 1 | 0 | t (1 row) +CREATE UNIQUE INDEX base_idx_unique ON base(col); +ALTER TABLE base ADD PRIMARY KEY USING INDEX base_idx_unique; +INSERT INTO base VALUES (1, 1); -- should fail. +ERROR: duplicate key value violates unique constraint "base_idx_unique" +INSERT INTO base VALUES (4, 4), (5, 5); +SELECT col, col2 FROM base; + col | col2 +-----+------ + 1 | 3 + 2 | 2 + 3 | 1 + 4 | 4 + 5 | 5 +(5 rows) + CREATE TABLE base2 (col int, col2 int) WITH (COLOCATION=false); CREATE INDEX base2_idx ON base2(col2); INSERT INTO base2 VALUES (1, 3), (2, 2), (3, 1); @@ -592,10 +607,66 @@ CREATE TABLE nopk_udt (id typeid, v int); ALTER TABLE nopk_udt ADD PRIMARY KEY (id); -- should fail. ERROR: PRIMARY KEY containing column of type 'user_defined_type' not yet supported -- test pk USING INDEX. -CREATE TABLE nopk_usingindex (id int); -CREATE UNIQUE INDEX nopk_idx ON nopk_usingindex (id ASC); +CREATE TABLE nopk_usingindex (id int) SPLIT INTO 5 TABLETS; +INSERT INTO nopk_usingindex VALUES (1), (2), (3); +CREATE INDEX nopk_idx ON nopk_usingindex(id); +CREATE UNIQUE INDEX nopk_idx2 ON nopk_usingindex (id HASH); +CREATE UNIQUE INDEX nopk_idx3 ON nopk_usingindex (id ASC); +CREATE UNIQUE INDEX nopk_idx4 ON nopk_usingindex (id DESC); ALTER TABLE nopk_usingindex ADD PRIMARY KEY USING INDEX nopk_idx; -- should fail. -ERROR: ALTER TABLE / ADD CONSTRAINT PRIMARY KEY USING INDEX is not supported +ERROR: "nopk_idx" is not a unique index +LINE 1: ALTER TABLE nopk_usingindex ADD PRIMARY KEY USING INDEX nopk... + ^ +DETAIL: Cannot create a primary key or unique constraint using such an index. +INSERT INTO nopk_usingindex VALUES (null); +ALTER TABLE nopk_usingindex ADD PRIMARY KEY USING INDEX nopk_idx2; -- should fail. +ERROR: column "id" contains null values +DELETE FROM nopk_usingindex WHERE id IS NULL; +ALTER TABLE nopk_usingindex ADD PRIMARY KEY USING INDEX nopk_idx2; +SELECT num_tablets, num_hash_key_columns FROM yb_table_properties('nopk_usingindex'::regclass); + num_tablets | num_hash_key_columns +-------------+---------------------- + 5 | 1 +(1 row) + +INSERT INTO nopk_usingindex VALUES (4); +INSERT INTO nopk_usingindex VALUES (1); -- should fail. +ERROR: duplicate key value violates unique constraint "nopk_idx2" +INSERT INTO nopk_usingindex VALUES (null); -- should fail. +ERROR: null value in column "id" violates not-null constraint +DETAIL: Failing row contains (null). +SELECT * FROM nopk_usingindex ORDER BY id; + id +---- + 1 + 2 + 3 + 4 +(4 rows) + +DROP INDEX nopk_idx2; -- should fail. +ERROR: cannot drop index nopk_idx2 because constraint nopk_idx2 on table nopk_usingindex requires it +HINT: You can drop constraint nopk_idx2 on table nopk_usingindex instead. +ALTER TABLE nopk_usingindex DROP CONSTRAINT nopk_idx2; +ALTER TABLE nopk_usingindex ADD PRIMARY KEY USING INDEX nopk_idx3; +SELECT * FROM nopk_usingindex; + id +---- + 1 + 2 + 3 + 4 +(4 rows) + +DROP INDEX nopk_idx3; -- should fail. +ERROR: cannot drop index nopk_idx3 because constraint nopk_idx3 on table nopk_usingindex requires it +HINT: You can drop constraint nopk_idx3 on table nopk_usingindex instead. +ALTER TABLE nopk_usingindex DROP CONSTRAINT nopk_idx3; +ALTER TABLE nopk_usingindex ADD PRIMARY KEY USING INDEX nopk_idx4; -- should fail (not supported in PG). +ERROR: index "nopk_idx4" does not have default sorting behavior +LINE 1: ALTER TABLE nopk_usingindex ADD PRIMARY KEY USING INDEX nopk... + ^ +DETAIL: Cannot create a primary key or unique constraint using such an index. -- test adding/dropping pks on partitioned tables. CREATE TABLE nopk_whole (id int) PARTITION BY LIST (id); CREATE TABLE nopk_part1 PARTITION OF nopk_whole FOR VALUES IN (1, 2, 3); diff --git a/src/postgres/src/test/regress/expected/yb_pg_create_index.out b/src/postgres/src/test/regress/expected/yb_pg_create_index.out index 01b2c1889e4c..70426ef42992 100644 --- a/src/postgres/src/test/regress/expected/yb_pg_create_index.out +++ b/src/postgres/src/test/regress/expected/yb_pg_create_index.out @@ -152,6 +152,71 @@ LINE 1: REINDEX TABLE concur_heap; ^ HINT: Please report the issue on https://github.com/YugaByte/yugabyte-db/issues -- +-- Test ADD CONSTRAINT USING INDEX +-- +CREATE TABLE cwi_test( a int , b varchar(10), c char); +-- add some data so that all tests have something to work with. +INSERT INTO cwi_test VALUES(1, 2), (3, 4), (5, 6); +CREATE UNIQUE INDEX cwi_uniq_idx ON cwi_test(a , b); +ALTER TABLE cwi_test ADD primary key USING INDEX cwi_uniq_idx; +NOTICE: table rewrite may lead to inconsistencies +DETAIL: Concurrent DMLs may not be reflected in the new table. +HINT: See https://github.com/yugabyte/yugabyte-db/issues/19860. Set 'ysql_suppress_unsafe_alter_notice' yb-tserver gflag to true to suppress this notice. +\d cwi_test + Table "public.cwi_test" + Column | Type | Collation | Nullable | Default +--------+-----------------------+-----------+----------+--------- + a | integer | | not null | + b | character varying(10) | | not null | + c | character(1) | | | +Indexes: + "cwi_uniq_idx" PRIMARY KEY, lsm (a HASH, b ASC) + +\d cwi_uniq_idx + Index "public.cwi_uniq_idx" + Column | Type | Key? | Definition +--------+-----------------------+------+------------ + a | integer | yes | a + b | character varying(10) | yes | b +primary key, lsm, for table "public.cwi_test" + +CREATE UNIQUE INDEX cwi_uniq2_idx ON cwi_test(b , a); +ALTER TABLE cwi_test DROP CONSTRAINT cwi_uniq_idx, + ADD CONSTRAINT cwi_replaced_pkey PRIMARY KEY + USING INDEX cwi_uniq2_idx; +NOTICE: ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index "cwi_uniq2_idx" to "cwi_replaced_pkey" +NOTICE: table rewrite may lead to inconsistencies +DETAIL: Concurrent DMLs may not be reflected in the new table. +HINT: See https://github.com/yugabyte/yugabyte-db/issues/19860. Set 'ysql_suppress_unsafe_alter_notice' yb-tserver gflag to true to suppress this notice. +\d cwi_test + Table "public.cwi_test" + Column | Type | Collation | Nullable | Default +--------+-----------------------+-----------+----------+--------- + a | integer | | not null | + b | character varying(10) | | not null | + c | character(1) | | | +Indexes: + "cwi_replaced_pkey" PRIMARY KEY, lsm (b HASH, a ASC) + +\d cwi_replaced_pkey + Index "public.cwi_replaced_pkey" + Column | Type | Key? | Definition +--------+-----------------------+------+------------ + b | character varying(10) | yes | b + a | integer | yes | a +primary key, lsm, for table "public.cwi_test" + +DROP INDEX cwi_replaced_pkey; -- Should fail; a constraint depends on it +ERROR: cannot drop index cwi_replaced_pkey because constraint cwi_replaced_pkey on table cwi_test requires it +HINT: You can drop constraint cwi_replaced_pkey on table cwi_test instead. +DROP TABLE cwi_test; +-- ADD CONSTRAINT USING INDEX is forbidden on partitioned tables +CREATE TABLE cwi_test(a int) PARTITION BY hash (a); +create unique index on cwi_test (a); +alter table cwi_test add primary key using index cwi_test_a_idx ; +ERROR: ALTER TABLE / ADD CONSTRAINT USING INDEX is not supported on partitioned tables +DROP TABLE cwi_test; +-- -- REINDEX (VERBOSE) -- CREATE TABLE reindex_verbose(id integer primary key); diff --git a/src/postgres/src/test/regress/expected/yb_pg_index_including.out b/src/postgres/src/test/regress/expected/yb_pg_index_including.out index 7c80591d65b6..aef86e25f95b 100644 --- a/src/postgres/src/test/regress/expected/yb_pg_index_including.out +++ b/src/postgres/src/test/regress/expected/yb_pg_index_including.out @@ -82,16 +82,21 @@ WHERE i.indrelid = 'tbl_include_pk'::regclass ORDER BY c.relname; CREATE UNIQUE INDEX tbl_include_pk_pkey ON public.tbl_include_pk USING lsm (c1 HASH, c2 ASC) INCLUDE (c3, c4) (1 row) --- NOT SUPPORTED --- --- CREATE TABLE tbl_include_c4 (c1 int, c2 int, c3 int, c4 int); --- INSERT INTO tbl_include_c4 SELECT 1, 2*x, 3*x, 4 FROM generate_series(1,10) AS x; --- CREATE UNIQUE INDEX tbl_include_c4_idx_unique ON tbl_include_c4 using lsm (c1, c2) INCLUDE (c3, c4); --- ALTER TABLE tbl_include_c4 add PRIMARY KEY USING INDEX tbl_include_c4_idx_unique; --- SELECT pg_get_indexdef(i.indexrelid) --- FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid --- WHERE i.indrelid = 'tbl_include_c4'::regclass ORDER BY c.relname; --- +CREATE TABLE tbl_include_c4 (c1 int, c2 int, c3 int, c4 int); +INSERT INTO tbl_include_c4 SELECT 1, 2*x, 3*x, 4 FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_include_c4_idx_unique ON tbl_include_c4 using lsm (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl_include_c4 add PRIMARY KEY USING INDEX tbl_include_c4_idx_unique; +NOTICE: table rewrite may lead to inconsistencies +DETAIL: Concurrent DMLs may not be reflected in the new table. +HINT: See https://github.com/yugabyte/yugabyte-db/issues/19860. Set 'ysql_suppress_unsafe_alter_notice' yb-tserver gflag to true to suppress this notice. +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_c4'::regclass ORDER BY c.relname; + pg_get_indexdef +--------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_include_c4_idx_unique ON public.tbl_include_c4 USING lsm (c1 HASH, c2 ASC) INCLUDE (c3, c4) +(1 row) + -- PK constraint. Must fail. CREATE TABLE tbl_include_c4_pk (c1 int, c2 int, c3 int, c4 int); INSERT INTO tbl_include_c4_pk SELECT 1, 2, 3*x, 4 FROM generate_series(1,10) AS x; diff --git a/src/postgres/src/test/regress/sql/yb_alter_table_rewrite.sql b/src/postgres/src/test/regress/sql/yb_alter_table_rewrite.sql index 5c5f083f6fd0..74a4df61d2e9 100644 --- a/src/postgres/src/test/regress/sql/yb_alter_table_rewrite.sql +++ b/src/postgres/src/test/regress/sql/yb_alter_table_rewrite.sql @@ -138,6 +138,11 @@ SELECT col, col2, col4 FROM base ORDER BY col; \d+ base; SELECT num_tablets, num_hash_key_columns, is_colocated FROM yb_table_properties('base_idx'::regclass); +CREATE UNIQUE INDEX base_idx_unique ON base(col); +ALTER TABLE base ADD PRIMARY KEY USING INDEX base_idx_unique; +INSERT INTO base VALUES (1, 1); -- should fail. +INSERT INTO base VALUES (4, 4), (5, 5); +SELECT col, col2 FROM base; CREATE TABLE base2 (col int, col2 int) WITH (COLOCATION=false); CREATE INDEX base2_idx ON base2(col2); INSERT INTO base2 VALUES (1, 3), (2, 2), (3, 1); @@ -246,9 +251,29 @@ CREATE TYPE typeid AS (i int); CREATE TABLE nopk_udt (id typeid, v int); ALTER TABLE nopk_udt ADD PRIMARY KEY (id); -- should fail. -- test pk USING INDEX. -CREATE TABLE nopk_usingindex (id int); -CREATE UNIQUE INDEX nopk_idx ON nopk_usingindex (id ASC); +CREATE TABLE nopk_usingindex (id int) SPLIT INTO 5 TABLETS; +INSERT INTO nopk_usingindex VALUES (1), (2), (3); +CREATE INDEX nopk_idx ON nopk_usingindex(id); +CREATE UNIQUE INDEX nopk_idx2 ON nopk_usingindex (id HASH); +CREATE UNIQUE INDEX nopk_idx3 ON nopk_usingindex (id ASC); +CREATE UNIQUE INDEX nopk_idx4 ON nopk_usingindex (id DESC); ALTER TABLE nopk_usingindex ADD PRIMARY KEY USING INDEX nopk_idx; -- should fail. +INSERT INTO nopk_usingindex VALUES (null); +ALTER TABLE nopk_usingindex ADD PRIMARY KEY USING INDEX nopk_idx2; -- should fail. +DELETE FROM nopk_usingindex WHERE id IS NULL; +ALTER TABLE nopk_usingindex ADD PRIMARY KEY USING INDEX nopk_idx2; +SELECT num_tablets, num_hash_key_columns FROM yb_table_properties('nopk_usingindex'::regclass); +INSERT INTO nopk_usingindex VALUES (4); +INSERT INTO nopk_usingindex VALUES (1); -- should fail. +INSERT INTO nopk_usingindex VALUES (null); -- should fail. +SELECT * FROM nopk_usingindex ORDER BY id; +DROP INDEX nopk_idx2; -- should fail. +ALTER TABLE nopk_usingindex DROP CONSTRAINT nopk_idx2; +ALTER TABLE nopk_usingindex ADD PRIMARY KEY USING INDEX nopk_idx3; +SELECT * FROM nopk_usingindex; +DROP INDEX nopk_idx3; -- should fail. +ALTER TABLE nopk_usingindex DROP CONSTRAINT nopk_idx3; +ALTER TABLE nopk_usingindex ADD PRIMARY KEY USING INDEX nopk_idx4; -- should fail (not supported in PG). -- test adding/dropping pks on partitioned tables. CREATE TABLE nopk_whole (id int) PARTITION BY LIST (id); CREATE TABLE nopk_part1 PARTITION OF nopk_whole FOR VALUES IN (1, 2, 3); diff --git a/src/postgres/src/test/regress/sql/yb_pg_create_index.sql b/src/postgres/src/test/regress/sql/yb_pg_create_index.sql index ef734d6627c4..56b798c9e3be 100644 --- a/src/postgres/src/test/regress/sql/yb_pg_create_index.sql +++ b/src/postgres/src/test/regress/sql/yb_pg_create_index.sql @@ -142,6 +142,40 @@ COMMIT; VACUUM FULL concur_heap; REINDEX TABLE concur_heap; +-- +-- Test ADD CONSTRAINT USING INDEX +-- + +CREATE TABLE cwi_test( a int , b varchar(10), c char); + +-- add some data so that all tests have something to work with. + +INSERT INTO cwi_test VALUES(1, 2), (3, 4), (5, 6); + +CREATE UNIQUE INDEX cwi_uniq_idx ON cwi_test(a , b); +ALTER TABLE cwi_test ADD primary key USING INDEX cwi_uniq_idx; + +\d cwi_test +\d cwi_uniq_idx + +CREATE UNIQUE INDEX cwi_uniq2_idx ON cwi_test(b , a); +ALTER TABLE cwi_test DROP CONSTRAINT cwi_uniq_idx, + ADD CONSTRAINT cwi_replaced_pkey PRIMARY KEY + USING INDEX cwi_uniq2_idx; + +\d cwi_test +\d cwi_replaced_pkey + +DROP INDEX cwi_replaced_pkey; -- Should fail; a constraint depends on it + +DROP TABLE cwi_test; + +-- ADD CONSTRAINT USING INDEX is forbidden on partitioned tables +CREATE TABLE cwi_test(a int) PARTITION BY hash (a); +create unique index on cwi_test (a); +alter table cwi_test add primary key using index cwi_test_a_idx ; +DROP TABLE cwi_test; + -- -- REINDEX (VERBOSE) -- diff --git a/src/postgres/src/test/regress/sql/yb_pg_index_including.sql b/src/postgres/src/test/regress/sql/yb_pg_index_including.sql index c8ca2013234f..c7844cc42588 100644 --- a/src/postgres/src/test/regress/sql/yb_pg_index_including.sql +++ b/src/postgres/src/test/regress/sql/yb_pg_index_including.sql @@ -50,16 +50,13 @@ SELECT pg_get_indexdef(i.indexrelid) FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid WHERE i.indrelid = 'tbl_include_pk'::regclass ORDER BY c.relname; --- NOT SUPPORTED --- --- CREATE TABLE tbl_include_c4 (c1 int, c2 int, c3 int, c4 int); --- INSERT INTO tbl_include_c4 SELECT 1, 2*x, 3*x, 4 FROM generate_series(1,10) AS x; --- CREATE UNIQUE INDEX tbl_include_c4_idx_unique ON tbl_include_c4 using lsm (c1, c2) INCLUDE (c3, c4); --- ALTER TABLE tbl_include_c4 add PRIMARY KEY USING INDEX tbl_include_c4_idx_unique; --- SELECT pg_get_indexdef(i.indexrelid) --- FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid --- WHERE i.indrelid = 'tbl_include_c4'::regclass ORDER BY c.relname; --- +CREATE TABLE tbl_include_c4 (c1 int, c2 int, c3 int, c4 int); +INSERT INTO tbl_include_c4 SELECT 1, 2*x, 3*x, 4 FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_include_c4_idx_unique ON tbl_include_c4 using lsm (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl_include_c4 add PRIMARY KEY USING INDEX tbl_include_c4_idx_unique; +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_c4'::regclass ORDER BY c.relname; -- PK constraint. Must fail. CREATE TABLE tbl_include_c4_pk (c1 int, c2 int, c3 int, c4 int); From ea51592ff39d55f81f2ab295f91a3fc48f94da24 Mon Sep 17 00:00:00 2001 From: Steve Varnau Date: Thu, 12 Sep 2024 14:43:14 -0700 Subject: [PATCH 11/75] [DEVOPS-3234] yb_release: Change clean step error to warning Summary: In some environments, the "sbt clean" step resolves dependency and successfully downloads the scala-sbt module, but still returns error and causes the entire build to fail. This changes an error on the clean step to a warning. Should not affect official builds that always happen in a clean source tree. A real fatal error would be detected a bit later when checking for build results. Test Plan: Jenkins: compile only, build type: release Reviewers: jmak, kkannan Reviewed By: jmak Subscribers: devops, yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38023 --- managed/yb_release.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/managed/yb_release.py b/managed/yb_release.py index cb5407f58be8..e5ede126dddb 100755 --- a/managed/yb_release.py +++ b/managed/yb_release.py @@ -42,7 +42,7 @@ common_sbt_options = ['-batch', '-no-colors'] _, err = Popen(["sbt", "clean"] + common_sbt_options, stderr=PIPE).communicate() if err: - raise RuntimeError(err) + log_message(logging.WARNING, "sbt clean error: " + str(err)) log_message(logging.INFO, "Kick off SBT universal packaging") From 72c91c49f4e1582ab4bae6d2f5c7e4203529980d Mon Sep 17 00:00:00 2001 From: Aishwarya Chakravarthy Date: Fri, 13 Sep 2024 15:08:26 -0400 Subject: [PATCH 12/75] [docs] Release notes for 2.23.0.0-b710 (#23687) * started 2.23 rn * made 2.23 the first entry * added new features and badges * broken links * new release notes * added highlights * edits from review * small edit * changed date * added a known issue in yba * minor edit * bumped build number and date * link change * date change * b710 * format --------- Co-authored-by: Dwight Hodge --- docs/config/_default/menus.toml | 4 +- docs/config/_default/params.toml | 10 +- docs/content/preview/quick-start/docker.md | 2 +- .../preview/releases/yba-releases/_index.md | 2 +- .../preview/releases/yba-releases/v2.19.md | 2 +- .../preview/releases/yba-releases/v2.20.md | 2 +- .../preview/releases/yba-releases/v2.21.md | 6 +- .../preview/releases/yba-releases/v2.23.md | 332 ++++++++++++++ .../preview/releases/ybdb-releases/_index.md | 3 +- .../preview/releases/ybdb-releases/v2.21.md | 17 +- .../preview/releases/ybdb-releases/v2.23.md | 425 ++++++++++++++++++ .../preview/releases/ybdb-releases/v2024.1.md | 8 +- .../migrate-replicated.md | 8 +- docs/data/currentVersions.json | 12 +- 14 files changed, 798 insertions(+), 35 deletions(-) create mode 100644 docs/content/preview/releases/yba-releases/v2.23.md create mode 100644 docs/content/preview/releases/ybdb-releases/v2.23.md diff --git a/docs/config/_default/menus.toml b/docs/config/_default/menus.toml index 7e615c00bb27..5b97307f1c3d 100644 --- a/docs/config/_default/menus.toml +++ b/docs/config/_default/menus.toml @@ -359,7 +359,7 @@ name = "YugabyteDB" weight = 10 identifier = "yugabytedb" - url = "/stable/" + url = "/preview/" [home.params] showSection = true @@ -367,7 +367,7 @@ name = "YugabyteDB Anywhere" weight = 11 identifier = "yugabytedb-anywhere" - url = "/stable/yugabyte-platform/" + url = "/preview/yugabyte-platform/" [home.params] showSection = true diff --git a/docs/config/_default/params.toml b/docs/config/_default/params.toml index b547b67a6220..84d6043cad4c 100644 --- a/docs/config/_default/params.toml +++ b/docs/config/_default/params.toml @@ -51,12 +51,12 @@ version_menu_pagelinks = true sidebar_search_disable = true ul_show = 1 +[[versions]] + url = "/preview" + version = "v2.23 (Preview)" [[versions]] url = "/stable" version = "v2024.1 (STS)" -[[versions]] - url = "/preview" - version = "v2.21 (Preview)" [[versions]] url = "/v2.20" version = "v2.20 (LTS)" @@ -74,8 +74,8 @@ version_menu_pagelinks = true # Custom params, regardless of theme [yb] terms_of_service = "https://www.yugabyte.com/terms-of-service/" - preview_version = "v2.21" - preview_version_slug = "stable" + preview_version = "v2.23" + preview_version_slug = "preview" # To disable heading/title icons for particular page, define `hideHeadingIcon: true` on that page params. heading_icons = true diff --git a/docs/content/preview/quick-start/docker.md b/docs/content/preview/quick-start/docker.md index 74e1921c9577..826b0b043787 100644 --- a/docs/content/preview/quick-start/docker.md +++ b/docs/content/preview/quick-start/docker.md @@ -104,7 +104,7 @@ docker ps ```output CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -c1c98c29149b yugabytedb/yugabyte:{{< yb-version version="stable" format="build">}} "/sbin/tini -- bin/y…" 7 seconds ago Up 5 seconds 0.0.0.0:5433->5433/tcp, 6379/tcp, 7100/tcp, 0.0.0.0:7000->7000/tcp, 0.0.0.0:9000->9000/tcp, 7200/tcp, 9100/tcp, 10100/tcp, 11000/tcp, 0.0.0.0:9042->9042/tcp, 0.0.0.0:15433->15433/tcp, 12000/tcp yugabyte +c1c98c29149b yugabytedb/yugabyte:{{< yb-version version="preview" format="build">}} "/sbin/tini -- bin/y…" 7 seconds ago Up 5 seconds 0.0.0.0:5433->5433/tcp, 6379/tcp, 7100/tcp, 0.0.0.0:7000->7000/tcp, 0.0.0.0:9000->9000/tcp, 7200/tcp, 9100/tcp, 10100/tcp, 11000/tcp, 0.0.0.0:9042->9042/tcp, 0.0.0.0:15433->15433/tcp, 12000/tcp yugabyte ``` Run the following command to check the cluster status: diff --git a/docs/content/preview/releases/yba-releases/_index.md b/docs/content/preview/releases/yba-releases/_index.md index aafcb011e562..47309e6fe2c3 100644 --- a/docs/content/preview/releases/yba-releases/_index.md +++ b/docs/content/preview/releases/yba-releases/_index.md @@ -14,8 +14,8 @@ cascade: | Release series | Released | End of maintenance support | End of Life (EOL) | | :------------- | :------- | :------------------------- | :---------------- | +| [v2.23](v2.23/) | {{< yb-eol-dates "v2.23" release >}} | n/a | n/a | | [v2024.1](v2024.1/) | {{< yb-eol-dates "v2024.1" release >}} | {{< yb-eol-dates "v2024.1" EOM >}} | {{< yb-eol-dates "v2024.1" EOL >}} | -| [v2.21](v2.21/) | {{< yb-eol-dates "v2.21" release >}} | n/a | n/a | | [v2.20](v2.20/) | {{< yb-eol-dates "v2.20" release >}} | {{< yb-eol-dates "v2.20" EOM >}} | {{< yb-eol-dates "v2.20" EOL >}} | | [v2.18](v2.18/) | {{< yb-eol-dates "v2.18" release >}} | {{< yb-eol-dates "v2.18" EOM >}} | {{< yb-eol-dates "v2.18" EOL >}} | | [v2.14](../ybdb-releases/v2.14/) | {{< yb-eol-dates "v2.14" release >}} | {{< yb-eol-dates "v2.14" EOM >}} | {{< yb-eol-dates "v2.14" EOL >}} | diff --git a/docs/content/preview/releases/yba-releases/v2.19.md b/docs/content/preview/releases/yba-releases/v2.19.md index 347f8dd03a6f..02149aec82a9 100644 --- a/docs/content/preview/releases/yba-releases/v2.19.md +++ b/docs/content/preview/releases/yba-releases/v2.19.md @@ -14,7 +14,7 @@ rightNav: type: docs --- -What follows are the release notes for all releases in the **YugabyteDB Anywhere** (YBA) v2.19 series. Content will be added as new notable features and changes are available in the patch releases of the YBA v2.19 series. +What follows are the release notes for all releases in the YugabyteDB Anywhere (YBA) v2.19 series. Content will be added as new notable features and changes are available in the patch releases of the v2.19 series. For an RSS feed of all release series to track the latest product updates, point your feed reader to the [RSS feed for releases](../index.xml). diff --git a/docs/content/preview/releases/yba-releases/v2.20.md b/docs/content/preview/releases/yba-releases/v2.20.md index 1642ac3dd4eb..e1301df012d8 100644 --- a/docs/content/preview/releases/yba-releases/v2.20.md +++ b/docs/content/preview/releases/yba-releases/v2.20.md @@ -21,7 +21,7 @@ type: docs ## Release notes -What follows are the release notes for all releases in the **YugabyteDB Anywhere** (YBA) v2.20 series. Content will be added as new notable features and changes are available in the patch releases of the YBA v2.20 series. +What follows are the release notes for all releases in the YugabyteDB Anywhere (YBA) v2.20 series. Content will be added as new notable features and changes are available in the patch releases of the v2.20 series. For an RSS feed of all release series to track the latest product updates, point your feed reader to the [RSS feed for releases](../index.xml). diff --git a/docs/content/preview/releases/yba-releases/v2.21.md b/docs/content/preview/releases/yba-releases/v2.21.md index 4cdfc421f634..049cadfe4c69 100644 --- a/docs/content/preview/releases/yba-releases/v2.21.md +++ b/docs/content/preview/releases/yba-releases/v2.21.md @@ -1,7 +1,7 @@ --- title: What's new in the YugabyteDB Anywhere v2.21 release series headerTitle: What's new in the YugabyteDB Anywhere v2.21 release series -linkTitle: v2.21 series (Preview) +linkTitle: v2.21 series description: Enhancements, changes, and resolved issues in the YugaybteDB Anywhere v2.21 preview release series. aliases: - /preview/releases/yba-release-notes/preview-release/ @@ -9,11 +9,11 @@ menu: preview_releases: identifier: yba-v2.21 parent: yba-releases - weight: 1055 + weight: 1057 type: docs --- -What follows are the release notes for all releases in the **YugabyteDB Anywhere** (YBA) v2.21 series. Content will be added as new notable features and changes are available in the patch releases of the YBA v2.21 series. +What follows are the release notes for all releases in the YugabyteDB Anywhere (YBA) v2.21 series. Content will be added as new notable features and changes are available in the patch releases of the v2.21 series. For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../../index.xml). diff --git a/docs/content/preview/releases/yba-releases/v2.23.md b/docs/content/preview/releases/yba-releases/v2.23.md new file mode 100644 index 000000000000..85100eadd984 --- /dev/null +++ b/docs/content/preview/releases/yba-releases/v2.23.md @@ -0,0 +1,332 @@ +--- +title: What's new in the YugabyteDB Anywhere v2.23 release series +headerTitle: What's new in the YugabyteDB Anywhere v2.23 release series +linkTitle: v2.23 series (Preview) +description: Enhancements, changes, and resolved issues in the YugaybteDB Anywhere v2.23 preview release series. +menu: + preview_releases: + identifier: yba-v2.23 + parent: yba-releases + weight: 1055 +type: docs +--- + +What follows are the release notes for all releases in the YugabyteDB Anywhere (YBA) v2.23 series. Content will be added as new notable features and changes are available in the patch releases of the v2.23 series. + +For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../../index.xml). + +## v2.23.0.0 - September 13, 2024 {#v2.23.0.0} + +**Build:** `2.23.0.0-b710` + +### Download + + + +### Change log + +
+ View the detailed changelog + +### Improvements + +* Allows periodic copying of core dumps to a designated volume by a background thread, enhancing dump management strategy. PLAT-12633 +* Alters the default `yba-ctl createBackup` behavior to skip the restart process, enhancing user experience. PLAT-12912 +* Changes the clock skew alert threshold from 500ms to 250ms, enabling detection of clock skew issues before TServer starts crashing, giving users more reaction time. PLAT-13249 +* Re-enables api_token endpoint access from HA follower for better automation setups. PLAT-13267 +* Adds all missing migration settings and ensures any on-the-fly adjustments to systemd units are recognized. PLAT-13330,PLAT-13331 +* Removes the alert for client certificate expiry and ensures it won't be added to new deployments. PLAT-13413,PLAT-13316 +* Changes the default permission to `600` for the backup manifest file to accommodate immutable NAS devices. PLAT-13578 +* Ensures valid inputs for `smtpPort` and `Server and Port` fields in `Create new alert channel` dialog to prevent errors. PLAT-13702 +* Adds a toggle in the UI to suppress health check notifications during maintenance windows. PLAT-13856 +* Introduces a health check to alert when runtime certification for node-to-node communication is nearing expiry, necessitating a restart roll, and advises on certificate rotation if on-disk certifications are due to expire within 30 days. PLAT-13865 +* Offers certificate expiration alerts based on certificates served by master and TServer processes, not just on-disk ones. PLAT-13865 +* Revisions will now alert users on expiration of actual TLS certificates served by the master and TServer processes. PLAT-13865 +* Allows Ansible tasks to run seamlessly even with very long host names by using hash for Ansible/SSH control path. PLAT-13938 +* Adjusts the disk availability check for upgrades to use the state file instead of the outdated .installed marker file. PLAT-14188 +* Introduces an adjustable 1-minute delay to AutoFlags promotion before undertaking any other action. PLAT-13139 +* Fixes the failure of admin user DDL due to concurrent DDLs across all cloud providers. PLAT-13221 +* Modifies the password reset URL to a new URL on the platform. PLAT-13510 +* Enables LDAP login users in YBA to restrict access using `ldapSearchFilter` in the LDAP configuration. PLAT-13209 +* Allows setting up of YugabyteDB in AWS Singapore Government's GCC Plus environment by extending AZ name limit from 25 to 100 characters. PLAT-13212 +* Alerts now raise 30 days prior to certificate expiry for better visibility, reducing the risk of missed expiration. PLAT-13348 +* Introduces a new feature, `tablet guardrail mechanism`, that triggers a warning when the ratio of live tablet peers to the supportable tablet peers is more than 0.9, and a severe alert when it's more than 1.0. Available from 2024.1 onward. PLAT-13520 +* Displays uptime/downtime status of the service in yba-ctl outputs for enhanced visibility. PLAT-13532 +* Adds more air gap checks to Ansible installation steps to prevent failures when connecting to public repositories. PLAT-14331 +* Allows automatic backup and rollback during upgrades, ensuring continued service without any disruption. PLAT-14776 +* Displays clear optional tags and tooltips for Azure provider's `Network Resource Group` and `Network Subscription ID` fields for better understanding when to fill them. PLAT-12546 +* Ensures secure SSH key validation and updates error format for on-prem providers for better consistency. PLAT-13064 +* Adds `ikeep` to the XFS mount options to mitigate possible key duplication in the block cache. PLAT-13192 +* Turns off "assign public IP" option in Azure create universe by default. PLAT-13948 +* Replaces CentOS 7 repository URLs which are now invalid due to its EOL, ensuring continued CentOS 7 universe creation. PLAT-14546 +* Enables explicit removal of pexlock after usage to avoid interference when provisioning on-prem nodes manually. PLAT-14161 + +### Bug fixes + +* Allows node health checks to handle cases where the node name field is not mandatory for on-premises nodes. PLAT-11188 +* Allows automatic re-fetching of an expiring access token, ensuring uninterrupted user access if the `offline_access` scope is enabled. PLAT-11246 +* Allows YBC upgrades during initial backup and restore operations only. Fixes restore operations failure when the source universe UUID is null. PLAT-12663,PLAT-12644 +* Allows proper date display in UI by specifying an exact input format for diverse timezones. PLAT-12721 +* Reverts changes in platform UI to correct graph display issue linked to invalid date representation. PLAT-12721 +* Allows proper date display in UI by specifying an exact input format for diverse timezones. PLAT-12721 +* Ensures yba-ctl stop command stops YBA services gracefully, avoiding `ERRORED` status due to premature exit code. PLAT-12767 +* Adds lock timeouts for Ansible tasks to prevent failures when acquiring yum lockfile. PLAT-13029 +* Removes `lock_timeout` parameter from apt or package modules, resolving potential conflicts in Ansible 2.9. PLAT-13029 +* Bypasses clock sync check on a node if `chronyc` is not installed. PLAT-13137 +* Improves openssl command's output formatting for more reliable CN and SAN value retrieval during certification verification. PLAT-13169 +* Enables usage of underscores in GCS bucket names during GCP Backup configuration. PLAT-13266 +* Adds missing cloud regions in YBA metadata, keeping it in sync with available regions for EKS/GKS/AKS Kubernetes providers. PLAT-13374 +* Enables consistent generation of new incremental backup times in the event of clock skewness. PLAT-13375 +* Modifies node metrics file creation to explicitly set permissions, ensuring accessibility despite custom system umasks. PLAT-13378 +* Ensures universe unlock and restore progress if YBA UI crashes, reboots, or shuts down to avoid manual cleanup. PLAT-13409,PLAT-12830 +* Stops health check alerts during an active maintenance window by introducing a new parameter, `SuppressHealthCheckNotificationsConfig`, to the `MaintenanceWindow` model class and its APIs. PLAT-13518 +* Fixes LDAP validation to correctly identify the first instance of `ldap` using regex with whitespace characters. This eliminates previous false validations. PLAT-13575 +* Allows deletion of expired, aborted, or failed backups, removes redundant backups when a schedule is deleted, retries backup deletion before marking it as `Failed To Delete`, and queues ongoing deletions for later when YBA restarts. PLAT-13750 +* Lets users rotate node-to-node certificates alone without client-to-node encryption enabled. PLAT-13806 +* Unsnoozes all universe level health check notifications to encourage use of the maintenance window function. PLAT-13928 +* Corrects the checksum mismatch in the V342 migration to ensure successful upgrades from 2.14 to later branches. PLAT-13977 +* Corrects timezone discrepancies in backup timestamps in the HA "Make Active" dialog. PLAT-14031 +* Allows runtime configuration get API to return the correct inherited value, not just the parent scope value. PLAT-14090 +* Stops master process before clearing master data folders to prevent unexpected states. PLAT-14095 +* Conceals DB user's password to prevent exposure in the application log during the upgrade procedure. PLAT-14286 +* Ensures the node agent installer doesn't overwrite PATH values in non-manual provisioning. PLAT-14332 +* Adjusts yml task to retain the last old release during the release GC process. PLAT-14368 +* Corrects the calculation of affected nodes in the certificate alerts message. PLAT-14385 +* Allows backing up, avoiding repetitive full backup tasks and updating incremental backup time only after passing all validation checks. PLAT-14497 +* Allows WaitForPod to overlook runtime exceptions when fetching pod status, preventing rolling restart failures in k8's due to a `Pod not found` error. PLAT-14498 +* Incorporates a DDL atomicity check into the health check script, ensuring timely detection of DDL corruption issues. PLAT-14696 +* Addresses issues with yb_platform_backup.sh for custom replicated storage paths by correctly assessing version checks, writing to the right directories, and ensuring smooth container restarts after configuration changes. PLAT-14705 +* Displays a warning message to verify the selected image on a standalone VM before initiating the upgrade. PLAT-14749 +* Reduces security risks by storing hashed API tokens instead of actual tokens in the users table. PLAT-8028 +* Disables weak `C` grade ciphers for key exchange to prevent security threats. Adjusts cipher suite list for Prometheus, allowing modification during installation/upgrade to disable certain ciphers. Ensures only `A` grade ciphers with key size greater than 2048 bits are used, enhancing security against potential attacks. PLAT-9590 +* Migrates to stronger TLS cipher suites for HTTPS Prometheus to improve security by preventing weak key exchange vulnerabilities. PLAT-9590 +* Prevents universe chain upgrade failure from 2.0 to 2.18 and 2.20 by ensuring the clock-script doesn't run during yb-process start command if it's not present or executable. PLAT-13444 +* Prevents `Edit Universe` modal from wrongly displaying master placement as `Place Masters on the same nodes as T-Servers` for a dedicated universe, providing accurate universe creation details. PLAT-13445 +* Systemd upgrades now skip Dual-NIC configuration, enhancing availability and reducing setup time. PLAT-13495 +* Enables handling of release artifacts that come with sha1 or md5 checksums after release migrations. PLAT-13716 +* Resolves a problem with dual-nic script functionality on GCP and Centos that was preventing connections to the public endpoint of single region VPC clusters. PLAT-14209 +* Corrects a typographical error in the dual-nic configuration script enhancing external connections to the cluster. PLAT-14370 +* Deprecates the change_password API due to lack of current password confirmation and introduces a new reset_password API that ensures increased account security by identifying the user through the Auth/API token only. PLAT-10472 +* Upgrades Azcopy to the latest version, addressing prior high and critical vulnerabilities for a safer use. PLAT-11235 +* Enables display of differences in instance tags in the confirmation modal box during FULL MOVE and UPDATE operations. PLAT-12085 +* Replaces deprecated API to prevent `Create Provider` timeout after 3 hours due to issues in Azure instance types query. PLAT-12558 +* Allows retrying of SystemdUpgrade tasks after a failure or aborted attempt. PLAT-13089 +* Resolves issue with incorrect delay used in `wait for server task` in Kubernetes, now using delay from configuration. PLAT-13182 +* Enables better handling of flag upgrade failure in Dual NIC case, rectifying issues with communication using secondary IPs. PLAT-13223 +* Disables checks for ybm temporarily due to issues with dual NIC. PLAT-13223 +* Allows for consistent loading of the placement modal in the create universe form regardless of the selected provider. PLAT-13294 +* Now, newly added nodes correctly assign master addresses, enhancing dual NIC usage for YBM. PLAT-13463 +* Marks pending tasks as failed on YBA restart to prevent indefinite pending state. PLAT-13516 +* Tunes GC generation sizes and fixes WSClient memory leak, also disables process metrics collector to prevent growing memory allocation over time. PLAT-13619 +* Eliminates file descriptor leaks enhancing database stability. PLAT-13665 +* Allows using a Java client for running node actions when node-agent is present, enhancing error reporting, and improving retry mechanisms. PLAT-13673 +* Adjusts tab display in the UI to prevent hiding due to addition of xCluster Disaster Recovery and CDC Replication Slots tabs. PLAT-13678 +* Eliminates an unnecessary dependency between timer and service files in the metrics collection under systemd. PLAT-13706 +* Restores initialization of the local instance's last backup time during HA sync for accurate updates. PLAT-13708 +* Fixes inconsistency in auto-generated YBA bundles, enabling correct default configuration when YBA version is bumped for AMI. PLAT-13796 +* Removes the misuse of defaultImageBundle in universe when custom AMI is specified using YBA's machineImage. PLAT-13800 +* Allows for updated machineImage passing from nodeDetails in disk operations, preventing edit universe tasks failure due to missing AMIs in AWS clusters. PLAT-13808 +* Upgrades PostgreSQL version to the latest 42.3.x addressing critical vulnerabilities. PLAT-13824 +* Redirects stderr logs in yb_backup.py to prevent kubectl warn logs from disrupting remote command executions. PLAT-14012 +* Allows error-free query for releases with artifacts of a specific deployment type by excluding artifacts without a linked release. PLAT-14057 +* Corrects ShellResponse in node-agent java-client to return a generic error code rather than 0 on connection errors. PLAT-14131 +* Prevents counting upgraded master nodes as "inactive" during a software upgrade, avoiding leadership issues. PLAT-14153 +* Ensures Edit Kubernetes Universe tasks only re-run after validating previous task parameters. PLAT-14203 +* Fixes issue where node-agent upgrade via java-client gets stuck due to incorrect permission settings. PLAT-14289 +* Allows the collect_metrics.timer and bind_check.service to operate independently, avoiding system hang-ups due to cyclic dependency issues. PLAT-14293 +* Removes circular dependency between ansible roles and limits node_exporter usage for non-YBM cases. PLAT-14297 +* Upgrades PostgreSQL from version 42.5.1 to 42.5.5 to mitigate security vulnerabilities. PLAT-14326 +* Resolves a compilation error by properly importing the `Stream` symbol. PLAT-14428 +* Updates Pekko version to fix the TLSActor infinite loop issue resulting in high CPU usage. PLAT-14524 +* Reverts added @JsonProperty annotations and up-versions Pekko to fix TLSActor infinite loop. PLAT-14524 +* Allows successful editing of Azure Provider by removing mandatory requirement of Network Resource Group and Network Subscription ID. PLAT-14530 +* Fixed the missing XmlElement dependency problem which was causing errors during Datadog validation. PLAT-14536 +* Corrects log file names for YB-Controller logs and ensures the creation of the `yb-controller-server.{INFO|WARN|ERROR}` symlink. PLAT-14594 +* Expand the YBC support bundle component to match log files with or without the process name prefix. PLAT-14609 +* Allows preserving the uploaded YBDB builds by relocating the directory, solving the issue of directory deletion after container restarts. PLAT-14655 +* Upgrades PostgreSQL version from 14.9 to 14.12 and backports to 18.x, 20.x, 21.x, and 2024.x. PLAT-14670 +* Ensures clearer error messages for node-agent installation failure due to existing local certificate files deletion. PLAT-14700 +* Fixes an issue where creating a universe fails due to a locale error, observed when transitioning between b659 and b664. PLAT-14814 +* Reverts change in JSON field name to avoid failing all preflight checks with node-agent. PLAT-14860 +* Enables thread safety for `Yrpc handleCallback` to prevent yb-client from consuming deferred results multiple times. PLAT-10056 +* Exposes YBA startup time as a measurable metric for AppInit elapsedTime. PLAT-10807 +* Upgrades go etcd and cyphar dependencies in yba-installer, enhancing security by fixing vulnerabilities. PLAT-12335 +* Upgrades mina-core package to a secure version 2.2.3 and paramiko to a non-vulnerable version 3.4.0. PLAT-12336 +* Changes the HTTP status code for conflicting edit tasks from 503 to CONFLICT to ensure accuracy. PLAT-12557 +* Enables direct file copying when creating a tarball for seamless third-party packages incorporation. PLAT-12564 +* Prevents the creation of unnecessary Master folders for read replica nodes to avoid version mismatches during software upgrades. PLAT-12806 +* Eliminates duplicate `exported_instance` label from Prometheus targets of DB exported metrics. PLAT-12808 +* Allows raising universe level alerts when the YBA Node Agent is down for over a minute. PLAT-12835 +* Introduces a background task for detecting failed master nodes in live universes, controllable by the `yb.automated_master_failover.enabled` runtime configuration. PLAT-12856 +* Enables automatic scheduling of master failover in the event of a failure. PLAT-12857 +* Allows using AZ UUID, instead of AZ name, to ensure uniqueness across regions when starting a new master. Also, prevents deletion of nodes in reserved state. PLAT-12886 +* Shifts the `useIMDSv2` field from AWS cloudinfo to the ImageBundle details for better provider creation payload configuration. PLAT-12967 +* Corrects the NTP Clock Sync health check failure on Amazon Linux 2 CIS hardened image. PLAT-13000 +* Solves the occasional issue of no return data during AWS VM creation due to non-updated AWS metadata by implementing retries. PLAT-13049 +* Enables successful backup creation of YCQL tables from the tables page by resolving the `Failed to parse BackupRequestParams` error. PLAT-13056 +* Enables preflight check validation for image bundles during provider creation/editing on AWS, preventing creation with missing AMI in corresponding region. PLAT-13111 +* Resolves rare memory pressure issue causing bad_alloc exception in RunOp queue and clears tablets_to_ip map before retry to prevent unwarranted uploads. Increases YBC client and server version. PLAT-13157 +* Allows editing the number of read replicas in K8S from both the backend and UI without error. PLAT-13163 +* Allows Kubernetes to refresh certificates using YBA Universe metadata's `rootCA` rather than taskParams, enhancing data backup integrity during a task failure. Resolves a `under replicated` error in node-to-node root certificate rotation, maintaining stability. PLAT-13172 +* Adjusts storage of SHA256 value for release artifacts to avoid exceptions. PLAT-13193 +* Increases the YBC client and server version to 2.1.0.0-b8, solves the issue with deletion of backups on certain NFS mount points. PLAT-13197 +* Adds a database version check to prevent software upgrade failures on xCluster universe. PLAT-13204 +* Allows recognition of custom alert names for replication lag by using the `template` field instead of the `name` field on the YBA UI, increasing alert configuration flexibility. PLAT-13211 +* Allows TaskExecutor to accommodate error codes by shifting retry info to task info. PLAT-13242 +* Refines AnsibleCreateServer cleanup routine to avoid forcing boot disk removal during retry on creation failure. PLAT-13270 +* Detects replicated app and avoids permission issues with Prometheus during yba-ctl installation. PLAT-13271 +* Allows Python to access seobject library via policycoreutils-python-utils on Alma9 for SSH port registration. PLAT-13276 +* Restores logging for Python subtasks marked with [app] that were previously not logged. PLAT-13313 +* Allows fetching the updated provider object from the database thereby ensuring correct SSH port configuration during bundle setup, preventing generation of bundles with wrong ports. PLAT-13325 +* Allows sort by version, date, and release state, and changes "copy file path" string to "copy file name". PLAT-13362,PLAT-13350 +* Changes `localProvider` key from `task` to `input` for its availability during sbt tasks. PLAT-13367 +* Allows tasks that install YugabyteDB to use the latest stable version instead of the universe intended version. PLAT-13373 +* Introduces a new runtime configuration `cluster_membership.timeout` to retry server tablets check for 1 min before node cleanup. PLAT-13381 +* Relocates the IMDSv2 toggle to the image bundle details on AWS provider creation page, improving customization per image bundle. PLAT-13333,PLAT-13429 +* Enables preview flag support in YBA with an added validation to ensure appropriate preview flag name setting. PLAT-13438 +* Adds a tooltip in the UI to explain the requirement of OIDC provider metadata. This offers guidance for configuring YBA in an air-gapped mode. PLAT-13446 +* Supports the inclusion of `unknownField` in the `userIntent` from subsequent configure calls. PLAT-13462 +* Enables replication role privileges for admin user to create replication slots using the `createRestrictedUser` function in YBA. PLAT-13486 +* Changes made to fall back to YugabyteDB's default AMI for YugabyteDB managed bundles if the AMI is not available in a particular region. For custom bundles, there's no fallback mechanism and it will fail early in situations where the respective AMI is missing. Dependency on region to ybImage removed. PLAT-13500 +* Allows the universe creation without facing a `NullPointerException` related to `placementInfo`. PLAT-13514 +* Adjusts Universe create and upgrade UI workflows to use the new ybdb_releases API, recommending suitable DB versions. PLAT-13519,PLAT-13435 +* Allows selection of preferred timezone setting for timestamps in metrics charts. PLAT-13538 +* Fixes UI crash tied to running DeleteNode after the last placement update task failure. PLAT-13546 +* Fixes the JS error causing unresponsiveness when clicking on the `In-Use Universes` tab in the releases page. PLAT-13549 +* Simplifies AWS/GCP/AZU provider settings by eliminating the `useTimeSync` toggle when `setUpChrony` is already enabled. Adds `enable_imdsv2_support` runtime flag to AWS provider form and allows toggling of IMDSv2 in YBAManged Imagebundle. PLAT-13536,PLAT-13551 +* Introduces Active Session History (ASH) logic to the Tablet Server (TS) User Interface (UI) to enhance analysis and troubleshooting. PLAT-13557 +* Disables kamon status page module to tackle security concerns. PLAT-13563 +* Cleans up expired entries in node agent client more actively to enhance performance. PLAT-13581 +* Addresses inconsistency in reading static configuration keys ensuring uniform processing. PLAT-13582 +* Updates AWS metadata to include new regions. PLAT-13623 +* Expands support for modifying universe in the UI, now allowing a volume size increase and placement modification, eliminating the need for `nodesResizeAvailable`. PLAT-13630 +* Allows usage of the sshUser configured in the provider when `machineImage` parameter is used during universe creation, reducing failures in custom AMI cloud cases. PLAT-13632 +* Ensures the `Upgrade Available` link only appears when upgrades are genuinely present and makes the CDC link clickable. PLAT-13675,PLAT-13677 +* Allows manual backups on HA standby nodes without interfering with the node's later promotion to primary. PLAT-13683 +* Resolves Jenkins build errors by using an older version of a dependency causing issues. PLAT-13690 +* Allows rerun of GFlagsUpgrade task without the AreNodesSafeToTakeDown precheck, making task retries successful. PLAT-13703 +* Corrects fetching of plan info from Azure VM image tags before VM creation, preventing cluster creation failure. PLAT-13712 +* Includes total CPU usage graph in YBA metrics pages for a more comprehensive view of CPU utilization. PLAT-13714 +* Integrates v2 changes into the UI, generates boilerplate with JavaScript API, transfers to a single top-level folder structure for APIs, hooks, and helper functions, generates API stubs codes using the Orval framework, and modifies the Axios path to /api/v2. Note: utilizes an older version of Orval due to the current build pipeline's limitations. PLAT-13742 +* Removes internal flags related to providers now enabled by default. PLAT-13743 +* Allows adding Aarch Linux Version even without any other aarch linux versions in AWS provider. PLAT-13744 +* Lets you store node metrics in the yb_home directory instead of the /tmp directory. PLAT-13755 +* Hides autoflags from the display when listing flags in the user interface. PLAT-13794 +* Upgrades spring-security-core to version 5.8.11, fixing a high-severity security vulnerability. PLAT-13828 +* Upgrades the commons-compress version from 1.25.0 to 1.26.0 addressing potential vulnerabilities. PLAT-13829 +* Upgrades to reactor-netty-http v1.0.39, 1.1.13 and netty-codec-http v4.1.108.Final increase database security. PLAT-13834 +* Upgrades python cryptography to 42.0.4 and setuptools to 65.5.1, enhancing security. PLAT-13835,PLAT-13836 +* Upgrades the Python requests library to version 2.31.0 addressing the {{}} vulnerability. PLAT-13843 +* Upgrades golang crypto to 0.17 to counteract high and medium vulnerabilities. PLAT-13844 +* Ensures CPU architecture selection is enabled for all providers, not just AWS, improving DB Versions API usage. PLAT-13852 +* Alters snooze alert behavior to also suppress universe health check alerts during maintenance windows. PLAT-13857 +* Fixes an issue that caused data from newer backups to be wrongly restored during the restoration of older backups. PLAT-13905 +* Allows normal workflows like systemd upgrade to function even when AMI is deleted from the cloud console. PLAT-13971 +* Resolves the issue of universe creation failure due to incorrect Image Bundle UUID by using the specified ec2-user. PLAT-14004 +* Makes sure NodeAgent-based shell process execution respects the `logCmdOutput` parameter, preventing health check run disruptions. PLAT-14048 +* Ensures `yb.runtime_conf_ui.tag_filter` accurately reflects BETA and INTERNAL flags in the UI, even without tab switching. PLAT-14098 +* Changes the tag of runtime configuration `oidc_feature_enhancements` from BETA to PUBLIC. PLAT-14140 +* Nullifies possibility of Null Pointer Exception when using old storage configuration based proxy without username. PLAT-14143 +* Allows default use of M-series instance types on AWS. PLAT-14196 +* Ensures keyspace is not left empty during restore API requests, preventing restoration issues. PLAT-14221 +* Adds ConnectOnly role to LDAP group table constraint for better access control. PLAT-14230 +* Enables setting of sshUser/Port from the overrides for backward compatibility, fixing Provider Edit to successfully update image Bundle. PLAT-14244 +* Ensures Centos7 deployments don't fail when using cron by shifting systemd configuration to the provision phase. PLAT-14275 +* Allows ignoring specific subtask failures with markers for further processing, paving the way for enhancements like auto master failover. PLAT-14316 +* Allows handling of large output in remote commands to prevent hanging. PLAT-14342 +* Splits locale installation for Ubuntu 20 on GCP into three tasks to prevent shell expansion issues. PLAT-14420 +* Fixes the `current lag` stat in xCluster to be table & stream specific, not influenced by other universes. PLAT-14425 +* Enables successful running of re-provision tasks without marking the universe as error in case of pre-check failures. PLAT-14440 +* Allows customization of the YBA Installer timeout duration to avoid process failure due to long startups. PLAT-14443 +* Shifts AWS queries to asynchronous task, preventing potential YugabyteDB Anywhere startup delays due to multiple AWS providers. PLAT-14444 +* Allows successful dropping of tables from non-db scoped replication without the unwanted nuisance of ILLEGAL_STATE errors. PLAT-14467 +* Enables setting of Prometheus auth without activating HTTPS for better authorization management. PLAT-14478 +* Enhances YBA Installer migrations to skip certain versions during backporting which can be applied later on upgrades. PLAT-14511 +* Fixes issue where clicking preview clears data and doesn't display correct information when setting up ysql_ident or ysql_hba multiline flags. PLAT-14515 +* Allows processing of all local releases during an import without failing due to local file issues. PLAT-14532 +* Allows fetching of static flags metadata for version 2.16+ from DB package when editing flags via UI. PLAT-14533 +* Stops deletion of key pair from cloud if `skipKeyPairValidate` is turned on, rectifying `edit Provider` test failures. PLAT-14624 +* Allows recalculating disk IOPS when volume size changes in Azure UltraSSD_LRS storage type. PLAT-14654 +* Upgrades Prometheus to the latest version, v2.53.1, in the chart. PLAT-14671 +* Upgrades YBC client and server version to 2.2.0.1-b3, resolving glibc version incompatibility issues on Alma 8 based YBC builds for CentOS 7 universes. PLAT-14722 +* Fixes node state reordering issue occurring post Linux version upgrade in "VM image upgrade" state. PLAT-14731 +* Corrects position of universe status loader on dashboard page. PLAT-14737 +* Replaces deprecated methods in the node agent for a smoother operation. PLAT-14746 +* Ensures correct import for TasklistTable on Platform, fixing the missing Toast import error. PLAT-14757 +* Adds `Region name` option and ensures regions are searched in the provider, fixing the issue of missing region metadata when adding provisioned nodes via Node Agent. PLAT-14790 +* Eliminates display of empty tooltips when Master or TServer nodes are unreachable. PLAT-14792 +* Enhances node resize retries and prevents nodes stuck in the `Resizing` state due to failure. PLAT-14822 +* Allows superadmin users to create Disaster Recovery (DR) setups and ensures DR links open in a new tab. PLAT-14861,PLAT-14862 +* Introduces feature flag to enable or disable the DDL atomicity check for better control, with subsequent reruns on health checks following previous failed checks. PLAT-15011 +* Blocks creation of multiple TLS certificates with the same name for a single customer to prevent confusion. PLAT-7406 +* Allows re-attempting node creation in Azure following previous failures, enhancing YBA reliability. PLAT-11654 +* Allows navigation back to the overview page from the universe view by clicking the universe name. PLAT-12592 +* Displays error when a selected zone doesn't contain any available nodes during universe configuration. PLAT-12959 +* Allows adding architecture to existing release and inserting data test ID for automation. PLAT-13227 +* Ensures index tables aren't overlooked when computing bootstrap parameters, preventing failure when adding a table to a database already containing an index. Also removes options to add/remove index tables in transactional xCluster configs. PLAT-13308 +* Allows users to pass depreciated fields when editing a provider, ensuring compatibility with providers created using legacy APIs. Fixes issue where providers created on version 2.14 were not editable through UI. PLAT-13394 +* Allows users to alter the API path using an environment variable from the app hosting TS UI. PLAT-13539 +* Now generates necessary artifacts correctly when ReleaseMetadata contains both a local and a helm chart, and ensures inclusion of local helm charts on YBA restart. PLAT-13558,PLAT-13561 +* Skips checks for `ListLiveTabletServers` API for YugabyteDB versions earlier than 2.8 to prevent chain upgrade failures. PLAT-13657 +* Ensures generation of YBA Managed bundles even if `yugaware_property` contains legacy ones, paving the way for successful version comparison and patching. PLAT-13681 +* Resolves minor condition check issue that prevented YBA Managed Bundles generation during first OS Patching enablement. PLAT-13681 +* Allows YBA to function using the latest TS Framework version. PLAT-13687 +* Allows filtering by event operations in OUTLIER mode and upgrades YBA to the latest TS Framework version. PLAT-13687 +* Ensures automatic setting of default image when creating a universe, enhancing user experience. PLAT-13722 +* Displays correct SSH port in the connection modal on user interface. PLAT-13754 +* Enables proper import/export of universe with newer releases, preventing attach/detach universe failures. PLAT-13761 +* Removes SystemdUpgrade from IN_TRANSIT list to address failures on -gcp-rf3 on master build. PLAT-13770 +* Removes the runtime configuration for the CA trust store as it's enabled by default from version 2.18. PLAT-13798 +* Refines empty list component styling and deactivates the action button on Linux version catalog when no versions are present for consistency. PLAT-13776,PLAT-13807 +* Enables users to create Database Replication with db scoped replication on YBA using the `db_scoped.enabled` runtime flag (though currently only supports non-TLS and non-bootstrapping uses). A new table `xcluster_namespace_config` added for tracking dbs/namespaces per xCluster configuration. PLAT-1386 +* Resolves UI issues on release list page, release details panel, add Release Modal, and Edit Release. PLAT-13918 +* Allows increasing TServer volume size in edit universe mode for K8 and enables resize of master volumes. PLAT-13920 +* Allows clicking on `Node Perform` check even when the node is in a decommissioned state. PLAT-14001 +* Enhances performance dashboards by adding task UUID to `ybp_universe_active_task_code` metrics and introducing a hidden API to retrieve specific task information. PLAT-14017 +* Updates task_uuid as a key label for proper in-memory updates, enhancing task analysis capabilities. PLAT-14017 +* Introduces `follower_lag_ms` metric to the dashboard for easier identification of lagging masters and struggling TServers. PLAT-14254 +* Displays last anomaly detection run time on primary dashboard and maintains consistent color coding for outlier nodes on secondary dashboard. PLAT-14305 +* Rearranges all UI-driven flags to INTERNAL and eliminates unused YBM runtime conf tag to simplify flag usage. PLAT-14156,PLAT-14323 +* Shifts all node agent based flags from BETA to INTERNAL in Provider Conf keys file for better flag classification. PLAT-14324 +* Adds validation to disallow the non-restart upgrade option during rollback. PLAT-14390 +* Migrates YSQL/YCQL configuration RBAC checks to universe actions level and integrates RBAC for PG Compatibility. PLAT-14668 +* Now displays a toast message when customer profile information gets updated. PLAT-14740 +* Displays the Premium V2 Storage type as an option for Azure during Create/Edit scenarios based on runtime configs. PLAT-14750 +* Reconfigures RBAC actions on Releases API and makes the previously hidden upload API external. PLAT-14756 +* Adds a runtime configuration to enable or disable the `Download Metrics as PDF` button in Metrics Page. PLAT-14781 +* Enhances yb.allow_db_version_more_than_yba_version for better YBA/DB version checks. PLAT-14800 +* Removes image OS check in AMI name for bootstrap scripts, enhancing compatibility with custom images. PLAT-3838 +* Stops logging the entire contents of the CA certificate in plaintext during deletion requests. PLAT-11650 +* Facilitates handling multiple comma-separated hostnames in YBA installer, enhancing template files, status commands, and reconfiguration wait times. PLAT-13096 +* Shows only unique cloud provider codes on the Universe region map, eliminating any duplicates. PLAT-13138 +* Updates URL endpoint for troubleshooting service to support server requests over HTTPS. PLAT-13154 +* Corrects the retrieval of autoflags from the target universe, ensuring TServer autoflags, not master autoflags, are compared during backup procedures. PLAT-13161 +* Marks `useIMDSv2` as deprecated at the provider level and moves it back to AWS cloud info. PLAT-13482 +* Updates YBC client and server versions to 2.1.0.0-b9, removing an error condition for multiple master leader addresses and enhancing Java client's resilience to short network outages. PLAT-13529 +* Now correctly reads inherited provider level runtime configuration values on Universe Form, enhancing geo-partitioning functionality. Fixes a bug with incorrect readings if values were not set at the provider level. PLAT-13606 +* Changes duplicate metric headings and updates flag keyword regex for LDAP configuration in Universe edit flags. PLAT-13697,PLAT-13395 +* Uptime now available for master nodes in DEDICATED mode, benefiting any cloud provider based universes and K8 universes. PLAT-13746,PLAT-13679,PLAT-12372 +* Allows safe extraction of DB files in a multi-thread environment by synchronizing conflicting buffer reads. PLAT-14160 +* Resolves an issue in yb_backup.py where the `stderr` keyword argument was incorrectly passed. PLAT-14208 +* Removes unnecessary `fromString` from storageType, previously used by the k8s operator. PLAT-14369 +* Corrects spelling errors in the success notification for dropping a table from replication. PLAT-14510 +* Safeguards API tokens by no longer storing them in plaintext, returning a refreshed API token with each getSessionInfo request. PLAT-14672 +* Allows multiple developers to use the same Kubernetes cluster by adding namespace name to the Helm release name and enabling the ability to override the release name with an environment variable. PLAT-14709 +* The GET /session_info API no longer sends the apiToken in the response, preventing the inadvertent breakage of client operations. PLAT-14710 +* Updates to `/session_info` API documentation to clarify that `getSessionInfo` will no longer generate `apiToken` with each invocation. PLAT-14710 +* Allows using internal load balancer as default while deploying devspace clusters without affecting existing port-forwarding workflow. PLAT-14798 +* Corrects the runtime configuration GET key endpoint that broke for object keys. PLAT-14829 +* Updates the incorrect YAML path in yba-installer to correctly get the restart seconds value. PLAT-14848 + +### Known issue + +* Unable to use the available node after it has been removed from the universe. PLAT 14590 + +
diff --git a/docs/content/preview/releases/ybdb-releases/_index.md b/docs/content/preview/releases/ybdb-releases/_index.md index c88e094e5e1d..c151a217962d 100644 --- a/docs/content/preview/releases/ybdb-releases/_index.md +++ b/docs/content/preview/releases/ybdb-releases/_index.md @@ -16,8 +16,8 @@ cascade: | Release series | Released | End of maintenance support | End of Life (EOL) | | :------------- | :------- | :------------------------- | :---------------- | +| [v2.23](v2.23/) | {{< yb-eol-dates "v2.23" release >}} | No support | n/a | | [v2024.1](v2024.1/) | {{< yb-eol-dates "v2024.1" release >}} | {{< yb-eol-dates "v2024.1" EOM >}} | {{< yb-eol-dates "v2024.1" EOL >}} | -| [v2.21](v2.21/) | {{< yb-eol-dates "v2.21" release >}} | No support | n/a | | [v2.20](v2.20/) | {{< yb-eol-dates "v2.20" release >}} | {{< yb-eol-dates "v2.20" EOM >}} | {{< yb-eol-dates "v2.20" EOL >}} | | [v2.18](v2.18/) | {{< yb-eol-dates "v2.18" release >}} | {{< yb-eol-dates "v2.18" EOM >}} | {{< yb-eol-dates "v2.18" EOL >}} | | [v2.14](v2.14/) | {{< yb-eol-dates "v2.14" release >}} | {{< yb-eol-dates "v2.14" EOM >}} | {{< yb-eol-dates "v2.14" EOL >}} | @@ -40,6 +40,7 @@ The following stable and preview releases are no longer supported: | Release series | Released | End of maintenance support | End of Life (EOL) | | :------------- | :------- | :------------------------- | :---------------- | +| [v2.21](v2.21/) | March 26, 2024 | n/a | n/a | | [v2.19](v2.19/) | March 8, 2024 | n/a | n/a | | [v2.17](v2.17/) | December 8, 2022 | n/a | n/a | | [v2.16](end-of-life/v2.16/) | December 14, 2022 | December 14, 2023 | June 14, 2024 | diff --git a/docs/content/preview/releases/ybdb-releases/v2.21.md b/docs/content/preview/releases/ybdb-releases/v2.21.md index a46ed4530786..bd3e40196562 100644 --- a/docs/content/preview/releases/ybdb-releases/v2.21.md +++ b/docs/content/preview/releases/ybdb-releases/v2.21.md @@ -1,9 +1,8 @@ --- -title: What's new in the v2.21 preview release series -headerTitle: What's new in the v2.21 preview release series -linkTitle: v2.21 series (Preview) -description: Enhancements, changes, and resolved issues in the v2.21 preview release series. -image: /images/section_icons/quick_start/install.png +title: What's new in the v2.21 release series +headerTitle: What's new in the v2.21 release series +linkTitle: v2.21 series +description: Enhancements, changes, and resolved issues in the v2.21 release series. aliases: - /preview/releases/release-notes/latest-release/ - /preview/releases/whats-new/latest-release @@ -11,10 +10,16 @@ menu: preview_releases: identifier: v2.21 parent: ybdb-releases - weight: 2810 + weight: 2811 type: docs --- +## Release announcements + +* [YugabyteDB 2.21 Introduces Industry-First Native Orchestration. It's Disaster Recovery Made Simple.](https://www.yugabyte.com/blog/simplified-disaster-recovery-release-221/) + +## Release notes + What follows are the release notes for the YugabyteDB v2.21 release series. Content will be added as new notable features and changes are available in the patch releases of the YugabyteDB v2.21 release series. For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../../index.xml). diff --git a/docs/content/preview/releases/ybdb-releases/v2.23.md b/docs/content/preview/releases/ybdb-releases/v2.23.md new file mode 100644 index 000000000000..ddaf8f97aa14 --- /dev/null +++ b/docs/content/preview/releases/ybdb-releases/v2.23.md @@ -0,0 +1,425 @@ +--- +title: What's new in the v2.23 preview release series +headerTitle: What's new in the v2.23 preview release series +linkTitle: v2.23 series (Preview) +description: Enhancements, changes, and resolved issues in the v2.23 preview release series. +menu: + preview_releases: + identifier: v2.23 + parent: ybdb-releases + weight: 2799 +type: docs +--- + +What follows are the release notes for the YugabyteDB v2.23 release series. Content will be added as new notable features and changes are available in the patch releases of the YugabyteDB v2.23 release series. + +For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../../index.xml). + +## v2.23.0.0 - September 13, 2024 {#v2.23.0.0} + +**Build:** `2.23.0.0-b710` + +### Downloads + + + +**Docker:** + +```sh +docker pull yugabytedb/yugabyte:2.23.0.0-b710 +``` + +### Highlights + +We're excited to announce the [technical preview](/preview/releases/versioning/#tech-preview-tp) of four powerful new features in YugabyteDB aimed at simplifying operations, enhancing functionality, and improving performance. + +**Instant database cloning** {{}} + +[Instant database cloning](/preview/manage/backup-restore/instant-db-cloning/) allows you to quickly create independent copies of your database for data recovery, development, and testing. Clones can be created in seconds, and initially consume no additional disk space because they share the same data files as the original database. Clones can be created as of now, or as of any time in the recent past (within a configurable history retention period), so developers can experiment without impacting production performance. Clones also provide a safety net for rapid recovery from accidental data loss or corruption. + +**pg_cron extension** {{}} + +We're introducing support for the [pg_cron extension](/preview/explore/ysql-language-features/pg-extensions/extension-pgcron/), which brings a cron-based job scheduler directly into the database. With pg_cron, you can schedule YSQL commands using familiar cron syntax, including jobs on intervals as fine as seconds. While pg_cron operates on a single node known as the pg_cron leader, the queries it schedules leverage the full resources of the distributed cluster. In case of node failure, leadership is automatically transferred, ensuring continuous availability and smooth scheduling operations. + +**Semi-automatic xCluster replication** {{}} + +[Semi-automatic xCluster replication](/preview/deploy/multi-dc/async-replication/async-replication-transactional/) simplifies the management of YSQL transactional xCluster replication by operating at the database level instead of the table level. This reduces the need for IT admin involvement when tables are created or dropped, as xCluster management is only required for adding or removing entire databases from replication. DDL operations can be performed by authorized users or DBAs without needing elevated privileges. Semi-automatic xCluster supports YSQL transactional replication and is ideal for Disaster Recovery (DR) purposes. + +**Improvement to backward scans** {{}} + +We've improved the performance of backward scans by 10x. For example, descending scans on tables that are stored in ascending order are now much faster. Previously, backward scans were significantly slower than forward scans in YugabyteDB, causing the query optimizer to implement various workarounds, including adding a sort operator on top of the forward scan operator to keep the backward scan pattern comparable. Such optimizer tricks might have been ineffective in complex query plans. The improvements to backward scan performance now allows such queries to be faster out of the box. You enable the feature using the `use_fast_backward_scan` preview flag. + + + +### Change log + +
+ View the detailed changelog + +### Improvements + +#### YSQL + +* Enhances logging for DDL transaction conflicts and PG catalog version mismatches by including the DDL command tag and specific log details outside of the `log_ysql_catalog_versions` flag. {{}} +* Reduces per-backend memory consumption by reinstating TOAST compression for catalogue tables. {{}} +* Enables DDL atomicity feature by default by altering `ysql_yb_ddl_rollback_enabled`, `report_ysql_ddl_txn_status_to_master`, and `ysql_ddl_transaction_wait_for_ddl_verification` flags' defaults. {{}} +* Adds a new YSQL view for YCQL statement metrics, allowing it to be joined with YCQL wait events in the `yb_active_universe_history` table. {{}} +* Displays distinct prefix keys explicitly in the explain output, enhancing the clarity of indexing for users. {{}} +* Adds auto flag `ysql_yb_enable_ddl_atomicity_infra` to control DDL atomicity feature during the upgrade phase. {{}} +* Updates read time for each operation to simplify code and avoid applying used read time from obsolete operations. {{}} +* Allows YbInitPinnedCacheIfNeeded to only load the shared pinned cache, enhancing concurrent handling of DDLs in various databases. {{}} +* Rectifies a compilation error by eliminating duplicate declaration and unnecessary function triggered by merge issues. {{}} +* Avoids schema version mismatch errors during ALTER TABLE operations in cases where DDL atomicity is enabled. {{}} +* Adds new columns to localhost:13000/statements for more comprehensive database management, including user and database IDs along with varied block level statistics. {{}} +* Now logs global-impact DDL statements that increment all database catalog versions. {{}} +* Reorganizes extensions into three segregated directories for better access and ease of use. {{}} +* Resolves schema version mismatch errors that occur after an ALTER TABLE operation due to DDL transaction verification in non-debug builds. {{}} +* Introduces a new YSQL configuration parameter `yb_parallel_range_size` for better tuning of parallel range size. {{}} +* Removes the unused `keep_order` field from `YbctidGenerator` for cleaner results processing. {{}} +* Introduces a new YSQL configuration parameter `yb_enable_parallel_append` to disable the unannounced feature `parallel append`. {{}} +* Performs stylistic modifications and refactors in various YSQL scripts for better readability and performance. {{}} +* Adds support for creating vector indexes using a dummy ANN method `ybdummyann`, enabling preliminary vector-based searching in databases. {{}} +* Restricts the undesired usage of LWFunction by disallowing move copy. {{}} +* Simplifies the column binding logic for ybgin and lsm access methods and prepares for addition of user-defined index types. {{}} +* Enables the grammar for CREATE/DROP ACCESS METHOD for more flexible extension handling. {{}} +* Eradicates misleading log message during table creation with DDL atomicity enabled. {{}} +* Introduces a new enum PgYbrowidMode to deduplicate hidden internal column addition logic. {{}} +* Adds Save/Restore state functionality to `ConsistentReadPoint` using a new `Momento` class. {{}} +* Avoids renaming DocDb tables during legacy rewrite operations to prevent issues with backup/restore and improves handling of failed ADD/DROP primary key, ALTER TYPE operations. {{}} +* Stops python checks on all third-party extensions to avoid build failure. {{}} +* Simplifies and cleans up code in PgDml/PgSelect/PgSelectIndex classes, ensuring only necessary fields are used and removing redundant destructors, with no logic changes. {{}} +* Improves backward scans by updating the cost-based optimizer to consider backward scan enhancements, leading to significant execution-time improvements when `FLAGS_use_fast_backward_scan` is enabled. {{}} +* Introduces new role-related flags in `yb_backup.py` script to enhance backup and restore functionalities. {{}} +* Fixes various issues in the Batch Nested Loop Join code, particularly the new row array comparison, for clearer functioning and better documentation. {{}} + +#### YCQL + +* Now throws an error when using the unsupported GROUP BY clause in YCQL with autoflag `ycql_suppress_group_by_error` available for compatibility issues. {{}} + +#### DocDB + +* Adds an `ABORTED` state and an `abort_message` field to the `SysCloneStateInfoPB` object for better clone failure management. {{}} +* Offers a new stack trace tracking framework for improved visibility of disk I/O operations, configurable through the `track_stack_traces` flag. {{}} +* Resolves build failure caused by a problematic merge, offering better disk IO visibility by tracking IOs by stacktrace. {{}} +* Blocks writes based on the total number of bytes being flushed, not just when 2 memtables are flushing. {{}} +* Allows asynchronous DNS cache updating and resolution retry upon failure to reduce RPC call delays and prevent unexpected leadership changes. {{}},{{}} +* Enables reduction of duplicate code and custom flag filtering through `GetFlagInfos` relocation. {{}} +* Introduces a new flag to toggle on or off recommended memory defaults for increased control over individual memory settings. {{}} +* Introduces utility to dump top heap stacks when memory limit is exceeded for enhanced debugging. {{}} +* Deprecates unused flags and old svc_num_workers flags for clearer user configurations. {{}} +* Allows local debug builds of YugabyteDB to restart from release builds to enhance debugging. {{}} +* Shifts xCluster-related functions from Catalocustomeranager to XClusterSourceManager for easier management. {{}} +* Speeds up backward scans by building rows from end to start, reducing unnecessary repositioning and Seek calls, enabled with `FLAGS_use_fast_backward_scan` flag. {{}} +* Boosts error messaging clarity when preview flags are not set in `allowed_preview_flags_csv`. {{}} +* Prevents GetChangesForXCluster from returning an invalid safe time in certain conditions. {{}} +* Broadens the rewrite_test_log.py script to incorporate more directory replacements such as home directory, YB_SRC_ROOT, LLVM toolchain directory, and third-party dependencies directory. {{}} +* Adjusts TServer memory percentage from 50% to 48% for (4,8]GiB boxes and sets new recommendations for boxes over 16 GiB. {{}} +* Incorporates the usearch and fp16 header-only libraries into the src directory, simplifying code import. {{}} +* Returns the original error messages from CreateTable and CreateTablegroup instead of an incorrect "Database not found" message. {{}} +* Enables replication of DDL schemas and users to maintain consistency between source and target. {{}} +* Updates `AreNodesSafeToTakeDown` to return earlier and deliver a readable error message when hitting a timeout, using a control flag, `are_nodes_safe_to_take_down_timeout_buffer_ms` with a default setting of 2 seconds. {{}} +* Reduces unnecessary alerts by removing "No active snapshot" warning from the logs. {{}} +* Ensures Data Definition Language (DDL) operations replicate exactly once by checking the `replicated_ddls` table prior to rerunning any DDL. {{}} +* Streamlines the creation of xCluster streams by unifying the scattered code into `XClusterClient::CreateXClusterStream`. {{}} +* Allows for faster failover in xCluster DR by skipping the cleanup process when `skip_producer_stream_deletion` is set on `DeleteUniverseReplicationRequestPB`. Safe for upgrades and rollbacks. {{}} +* Reduces extra reactor threads by reusing server messenger in AutoFlags. {{}} +* Reduces resource usage by enabling stateful service client to reuse the server's existing yb_client. {{}} +* Adds `SCHECK_PB_FIELDS_NOT_EMPTY` macro for validating non-empty fields, with individual checks on list elements. {{}} +* Splits up yb_xcluster_ddl_replication.c into additional util files for better project extensibility. {{}} +* Removes deprecated JSON output format in xCluster for clean and efficient function. {{}} +* Allows only single DDL query strings to prevent issues with DDL+DML mixes or multiple DDLs. {{}} +* Shifts certain RPC endpoint methods from the Catalocustomeranager to the new MasterClusterHandler class for easier management. {{}} +* Adds `read-time` option description in the `help` of `ysql_dump` allowing database dump at a specified past time. {{}} +* Renames and replaces `cdc_consumer_handler_thread_pool_size` with `xcluster_consumer_thread_pool_size` to reduce CPU and memory usage. {{}} +* Refreshes stack trace tracking UI endpoints to enable per-column sorting and optimize sorting script. {{}} +* Allows table locking by acquiring local server object locks for DDLs and DMLs, hosted by a local transaction participant, with session ID and tied to their lifetime. Includes performance upgrades to lock acquisition and conflict resolution. {{}} +* Simplifies replication setup by using table IDs instead of names, helping avoid issues caused by table renames or recreations. {{}} +* Increases speed of backward scans for flat document reader with support for packed row V2. {{}} +* Reuses the Tservers `yb::client` in `CDCService` to decrease 4 threads and a meta cache, addressing a `CDCService`/xCluster source issue. {{}} +* Allows unified usage of XClusterRemoteClient in XClsuterConsumer, centralizing client creation logic. {{}} +* Ensures replication health before succeeding `IsSetupUniverseReplicationDone`, improving error detection. {{}} +* Adds `external_hybrid_time` to log-dump output to detect xCluster target writes. {{}} +* Introduces two new `SOURCE_UNREACHABLE` and `SYSTEM_ERROR` enums to enable more detailed error reporting from the Poller. {{}} +* Allows requesting streams by producer table ids for xCluster DDL Replication to accurately match tables together. {{}} +* Enhances stack trace tracking endpoints usability by adding an access button, a reset tracking link, and timestamp details. {{}} +* Renames and shifts various members and functions from Catalocustomeranager to XClusterManager. {{}} +* Replaces the deprecated and unused namespace replication with DB Scoped replication for a more efficient solution. {{}} +* Makes pggate aware of fast backward scan capability for accurate cost identification in the Cost Based Optimizer. {{}} +* Added `emergency_repair_mode` flag and new yb-admin commands to handle corrupted CatalogEntity data without stopping `yb-master`. {{}} +* Relocates Setup, Bootstrap, Alter and Delete Target Replication functions for better organization. {{}} +* Changes column ID representation in debug builds to be compatible with release builds. {{}} +* Refactored the `PopulateTabletCheckPointInfo` function to improve its readability and maintainability. {{}} +* Removes display of InvalidFlags in the flags UI to clean up the user interface. {{}} +* Allows for storing and loading of vector indexes, ensuring effective management of these indexes. {{}} +* Integrates class `DocDBStatistics` with `ReadOperationData` for better statistics transmission to `IntentAwareIterator`. {{}} +* Introduces a new flag `max_disk_throughput_mbps` for automated control of write rejections when disk is full, replacing `reject_writes_min_disk_space_aggressive_check_mb`. {{}} +* Adds a flag `FLAGS_tablet_split_min_size_ratio` to control tablet splitting based on SST file sizes, ensuring better control over tablet size imbalance. {{}} +* Relocates heartbeat code for greater readability and Catalog Manager size reduction. {{}},{{}} +* Simplifies the clone state manager by moving persisted data to an in-memory structure. {{}} +* Relocates specific heartbeat code to `master_heartbeat_service.cc` for enhanced readability and easier tracking. {{}},{{}} +* Refactors heartbeat path code for easier navigation and reduction of Catalocustomeranager size, with no functional changes. {{}},{{}} +* Shifts tcmalloc profiling code to the Utils folder. {{}} +* Moves tablet splitting specific RPCs and functions from `catalog_manager.cc` to `tablet_split_manager.cc` for better code management. {{}},{{}} +* Introduces a new flag `enable_rwc_lock_debugging` to control slow lock debugging and fixes a bug in `rwc_lock.cc`. {{}} +* Changes CloneStateInfo object from scoped_refptr to std::shared_ptr for standardization. {{}} +* Allows setting only non-empty schema names in YBTableName. {{}} +* Transfers ownership of TabletSplitManager, CloneStateManager, and SnapshotCoordinator from Catalocustomeranager to Master for leaner dependency requirements. {{}},{{}} + +#### CDC + +* Preserves CDC stream even when all associated tables are dropped, tying its lifecycle to the database. {{}} +* Introduces three new yb-admin commands to remove a user table from a CDCSDK stream, descend dynamic table addition in a CDC stream, and validate CDC state for a particular stream, enhancing control over CDC streams. {{}},{{}} +* Prevents addition of tables with enum array column to the CDC stream to avoid crashes during consumption. {{}} +* Transforms the flag `yb_enable_cdc_consistent_snapshot_streams` from a preview into a default true auto flag. {{}} +* Enables dynamic table addition with Postgres replication consumption by setting retention barriers on new tables' tablets during creation. {{}} +* Allows modification of the publication refresh interval using the `cdcsdk_publication_list_refresh_interval_secs` flag. {{}} +* Adds a TServer flag, `ysql_yb_default_replica_identity`, for customizing default replica identity at table creation. {{}} +* Introduces `cdcsdk_enable_dynamic_table_addition` flag to manage dynamic table additions in replication slot consumption model. {{}} +* Introduces replication slot name for internal distinction between two consumption models in the code. {{}} +* Allows creating an old model stream via yb-admin in upgraded environments, ensuring only one stream type per database. {{}} +* Introduces replica identity in CDC to populate before image records, allowing table-level before image information fetching and retaining in stream metadata. {{}} +* Eliminates unnecessary NOTICE messages when setting yb_read_time from walsender, reducing message clutter. {{}} +* Enables transaction state to be cleared promptly after a table is deleted, preventing table deletion from getting stuck and resulting in faster functionality. {{}} + +#### yugabyted + +* Allows Connection Manager to handle error messages, preventing test failures in the YSQL layer. {{}} +* Ensures `RENAME DATABASE` query handles logical and physical connections correctly for consistent database behavior. {{}} +* Redefines tracking of role modifications using role OID in YSQL Connection Manager for accurate behavior during mid-session role renaming. {{}} +* Offers support for single-use YSQL configuration parameters in YSQL Connection Manager. {{}} +* Enables using role OID with `session_authorization` in YSQL Connection Manager for correct role alterations in a session. {{}} +* Allows sticky connections when setting certain YSQL configuration parameters not permitted in explicit transactions. {{}} +* Allows modification of YSQL configuration parameters on a running cluster by destroying the control connection. {{}} +* Adds a new `/pitr` endpoint and screen in the database page to list scheduled PITRs on yugabyted UI. {{}} +* Introduces `upgrade_finalize` command for smoother YugabyteDB version upgrades using yugabyted CLI and includes an `upgrade_ysql_timeout` flag. {{}} +* Directly enables `yb_enable_read_committed_isolation` and `ysql_enable_read_request_caching` on `yb-master` and `yb-tserver` processes. {{}} +* Delivers alerts on user interface when encountering node version mismatches in the cluster. {{}} +* Simplifies yugabyted by dropping Python2 support and transitioning the script to use Python3, replacing deprecated distutils package with shutil. {{}},{{}} +* Enables better handling of multi-valued flags in yugabyted without duplication, making the system more maintainable. {{}} +* Corrects the Sankey diagram for CPU usage by accurately calculating the total number of used/available cores. {{}} +* Made changes to string literals in `yugabyted` to avoid SyntaxWarning and added checks for exceptions during incorrect `advertise_address` input. {{}},{{}} +* Enables correct parsing of startup parameters with spaces in values when using the YSQL Connection Manager. {{}} +* Allows to specify multiple data directories using the new `additional_data_dir` configuration. {{}} +* Enables xCluster replication management between database clusters using new yugabyted commands. {{}} +* Ensures yugabyted UI metrics display properly with Kubernetes OSS operator deployed clusters. {{}} +* Elevates the reliability of the UUID retrieval process for tablet server nodes on the user interface. {{}} +* Ensures accurate CPU usage metrics by updating `prev_ticks_` at each metrics snapshotting iteration. {{}} +* Allows smooth node restart even if the `data_dir` parameter is missing in the user configuration file. {{}} +* Reduces `collect_logs` command failures by removing the yugabyted running check even when the yugabyted process is not running. {{}} +* Enhances `yugabyted configure_read_replica` commands with checks to gracefully handle failures when `data_placement_constraint` lacks `:`. {{}} + +### Bug fixes + +#### YSQL + +* Fixes an error that occurs when decoding null values from a boolean column sorted as NULLS LAST in a secondary index. {{}} +* Fixes YSQL upgrade single connection mode error preventing new connection attempts before the old ones are released. {{}} +* Allows YB Admins to run pg_locks without requiring superuser status. {{}} +* Avoids failure when upgrading from version 2.14/2.16 to 2.20 by introducing a check to ensure pggate can handle RPC metrics sidecar before sending Scanned Rows count. {{}} +* Fixes memory leaks in pg_constraint/pg_attrdef local cache by adding a missing `hash_destroy` call in `YbCleanupTupleCache`. {{}} +* Resolves remaining memory leaks in CacheMemoryContext to stabilize cache memory after every catalog cache refresh. {{}} +* Documents the limitations of retry logic when using `-c` flag in `ysqlsh` command. {{}} +* Allows YSQL DDL operations to wait for rollback/roll-forward operations to finish before proceeding. {{}} +* Allows more accurate modeling of base scan costs by taking into account the impact of storage index filters on secondary index. {{}} +* Resolves colocation option issues in table creation linked to table rewriting and partitioning. Enhances the `defGetBoolean` function to parse string values "0" and "1" as false and true respectively, and shifts a verification step to a earlier spot in the CREATE TABLE execution path, ensuring successful table partition creation. {{}},{{}} +* Renames the YSQL configuration parameter `ddl_rollback_enabled` to `yb_ddl_rollback_enabled` for specificity. {{}} +* Adds network latency cost to startup cost, yielding more accurate cost calculations in small tables. {{}} +* Disables bitmap scan by default to prevent unwarranted selection due to lower CBO costs. {{}} +* Reduces unnecessary log messages when `catalog_version_table_in_perdb_mode` is set to true. {{}} +* Corrects an issue where certain unbatchable filters weren't detected during indexpath formation when indexpath accepted batched values from multiple relations. Requires backports to 2.20 and 2.18. {{}} +* Exposes the YSQL configuration parameter `yb_enable_optimizer_statistics` as a flag `ysql_yb_enable_optimizer_statistics`. {{}} +* Corrects buffer overflow during placement validation in `ALTER TABLE SET TABLESPACE` operation. {{}} +* Allows for an enhanced readability and performance of yb_cost_index code, aiding in merging with the pg15 branch. {{}} +* The deadlock issue occurring when both a table and its index are deleted concurrently in yb-master has been resolved. {{}} +* Refines the YbGetOrdinaryColumnsNeedingPgRecheck condition to align with the ybIsTupMismatch implementation, ensuring Postgres rechecks index conditions when the "preliminary check" is skipped due to an invalid target key attnum. {{}} +* Corrects checks in YbIsScanCompatible to ensure the right-hand side (RHS) of all bound index conditions, not just inequalities, fits into the left-hand side (LHS) datatype. {{}} +* Prevents query layer retries for multi-statement queries to avoid redoing whole queries, ensuring idempotence. {{}} +* Fixes a bug that caused incorrect setting of global catalog version mode on TServer start. {{}} +* Fixes a bug in the index tuple width calculation for better YB base scans cost model. {{}} +* Ensures pushed down RowCompareExpressions correctly enforce non-null column references, rectifying previous behavior and enhancing data accuracy. {{}} +* Reduces the frequency of `schema version mismatch` errors during consecutive DDL operations by ensuring the up-to-date schema is fetched. {{}} +* Allows usage of `YsqlDdlRollbackEnabled` in pggate C++ code by correctly passing the result of `YbDdlRollbackEnabled`, reducing DDL atomicity g-flag issues in RF 1 clusters. {{}} +* Adds a new flag `ysql_min_new_version_ignored_count` to prevent a TServer crash caused by the downward shift in yb-master's catalog version, often surfacing post a PITR restore operation. {{}} +* The postgres process no longer crashes when running a "show all" command due to correct placement of the `yb_enable_ddl_atomicity_infra` description. {{}} +* Prevents failures in transaction restarts with UPDATE ...RETURNING queries in debug builds. {{}} +* Reverts updates from `Storage SQL` to `Remote SQL` and `Storage Filter` to `Remote Filter` for Foreign Scan. {{}} +* Adjusts inaccurate `ALTER TABLE` rewrite check for dropped rules to prevent unnecessary command failure. {{}} +* Fills in the "relation" column in `pg_locks` with the correct table OID after a table rewrite. {{}} +* Adjusts the value of YB_AT_REWRITE_ALTER_PRIMARY_KEY to prevent flag clashes and accommodate future upstream PG flags. {{}} +* Fixes the issue of PG crash when `yb_debug_log_catcache_events=1` is used before a database has been selected. {{}} +* Enables backward parallel scan capabilities, adjusting key bounds when conducting descending order scans. {{}} +* Prevents unnecessary CPU cycles and log flooding by not reading `pg_yb_catalog_version` when `enable_ysql=false`. {{}} +* Corrects the log message for successful column drop operations, ensuring accurate representation of DDL operations. {{}} +* Stops Batched Nest Loop (BNL) crashes by ensuring better indexing condition checks. {{}} +* Refines the logic to accurately push down join clauses to batched index scans without causing conflicts. {{}} +* Grants BNL hashtable its own expression context to prevent data overwrites during query execution. {{}} +* Re-enables rechecking for RowCompareExpressions to accurately handle NULL inputs in scan bound calculations. {{}} +* Resolves the `old-style-declaration` error in YbDdlRollbackEnabled by changing its definition to `static inline bool`. {{}} +* Prevents potential crashes by ensuring `yb_table_properties` pointer, in `load_relcache_init_file`, does not point to random, invalid memory. {{}} +* Makes `yb_get_range_split_clause` robust using PG TRY CATCH block, ensuring YB backup doesn't fail. {{}} +* Fixes memory leaks in ybcFetchNextHeapTuple by properly freeing the YBCStatus. {{}} +* Prevents core dumps by ensuring YSQL webserver destruction upon receiving a termination signal. {{}} +* Introduces new functions to enhance and consolidate the focus on tables stored in the system catalog. {{}} +* Allows `CreateNamespaceIfNotExists` function to retry on "already exists" error, preventing race conditions. {{}} +* Fixes the issue when a separately created and later attached partition does not properly inherit the parent's primary key using `ALTER TABLE ...ATTACH PARTITION`. {{}} +* Resolves potential database OID collision with `system_postgres` by excluding reserved OID 65535 in allocation. {{}} +* Allows skipping the relfilenode check on parent partition tables which do not get recreated during table rewrites. {{}} +* Now allows for correct backward prefix-based scanning by eliminating the problematic `kGroupEnd` marker that was leading to inaccurate seek results. {{}} +* Removes the unused function `Catalocustomeranager::WaitForDdlVerificationToFinish` for clarity. {{}} +* Ensures bitmap scans correctly recheck all results and avoid excluding rows, improving accuracy of outcomes. {{}} +* Prevents the `IN` expressions on single column from wrongly taking the tuple path, ensuring correct data processing. {{}} +* Fixes incorrect access to the scan plan's bind descriptor during tuple IN condition rechecks. {{}} +* Allows the creation of new shared relations during YSQL upgrade to have a global impact by incrementing the catalog version across every database. {{}} +* Allows resetting of stats collected by the ANALYZE command, including `reltuples`, `pg_statistic` rows, and `pg_statistic_ext` values. {{}} +* Corrects the YbGetOrdinaryColumnsNeedingPgRecheck function to return table column numbers instead of index numbers, preventing unnecessary data fetches and potential crashes or errors after dropping a column. {{}} +* Resolves a detected deadlock during ALTER TABLE operations, enhancing test stability. {{}} +* Reduces unexpected log messages by not invoking `YsqlDdlTxnCompleteCallback` if all table 'pb_txn_id's in the DDL transaction verifier state are already cleared, avoiding potential deadlock situations in DDL atomicity. {{}} +* Reduces prefix length in the index when using distinct index scan with included columns. {{}} +* Returns more accurate results when running EXPLAIN command by fixing relids of prefix keys under a subquery distinct index scan. {{}} +* Reduces sequence cache collision by incorporating both database and sequence OIDs as the entry key. {{}} +* Prevents a crash related to memory release associated with TupleTableSlots in SubPlans during a Values Scan. {{}} +* Allows faster data inserts into tables with identity columns. {{}} +* Enhances log output by adding missing newlines in the `yb_pclose_check` function and corrects memory allocation. {{}} +* Streamlines the "drop column" operation process, preventing hindrance even if the alter schema RPC is missed. {{}} +* Reduces ASAN/TSAN builds' pressure on t-server/master and avoids timeout issues by using less parallelism. {{}} +* Eliminates unnecessary waiting for concurrent transactions in the DEFERRABLE mode for READ ONLY serializable transactions. {{}} +* Fixes TServer crash when pushing down certain SAOP operations like `string_to_array`. {{}} + +#### YCQL + +* Allows the deletion of the Cassandra role in YCQLsh without it regenerating upon cluster restart, by adding a flag to mark if the role was previously created. {{}} +* Removes extra reads during the processing of `INSERT INTO ...RETURNS STATUS AS ROW` for CQL tables with a specific primary key, improving system load and efficiency. {{}} +* Now ensures simultaneous registration of new split tablet children, fully covering the keyspace during splits. {{}} + +#### DocDB + +* Fixes hidden split parent tablets wrongly appearing as leaderless in the master's leaderless tablet endpoint, ensuring accurate load balance status and preventing potential issues with Point-in-Time Recovery (PITR) operations. {{}} +* Ensures failed xCluster setup if the xCluster stream state update to `ACTIVE` does not occur. {{}} +* Fix ensures heartbeat processing doesn't blindly overwrite tablet replica state, avoiding potential data corruption. {{}} +* Fixes crash when parsing an invalid timestamp in LTO build by updating C++ dependencies and error handling. {{}} +* Removes unnecessary flush during snapshot deletion, preventing write blocks. {{}} +* Eliminates possible deadlock during setup replication by fixing the order in which locks are acquired. {{}} +* Fixes issue of scans not honoring timeouts, preventing indefinite reads and reducing CPU usage. {{}} +* Allows restoring a snapshot schedule from a time just before the oldest snapshot, improving usability. {{}} +* Removes assumption that every tablet server hosts tablets, preventing potential crashes. {{}} +* Resolves a heartbeat metrics issue ensuring full xCluster error information is sent to the new master even during a leader failover, and makes `tserver_heartbeat_metrics_interval_ms` runtime updatable. {{}} +* Adds validation to RPCs `DeleteSnapshot` and `RestoreSnapshot` to prevent deletion or use of snapshots involved in ongoing processes. {{}} +* Ensures `Create Table` operation fails if `Alter Replication` encounters an error, enhancing the reliability of replication setup. {{}} +* Converted the `ysql_skip_row_lock_for_update` to an auto-flag to resolve compatibility issues during upgrade, preventing incorrect DB record creations that can affect row visibility and integrity. {{}} +* Modifies memory consumption calculations for pending operations to ensure accurate rejection of new writes at bootstrap, preventing loading failures. {{}} +* Trims large error messages in AsyncRpc::Failed to prevent hitting memory limit and resulting unavailability. {{}} +* Excludes hidden tables from `generate snapshot` output to circumvent cloning failure from recreated tables. {{}} +* Prevents `unexpected leader` fatal errors by updating cached leader terms immediately after a leader change. {{}} +* Renames and updates the description of the flag `min_secustomerent_size_to_rollover_at_flush` for clarity. {{}} +* Changes the class of `enable_automatic_tablet_splitting` flag from `kLocalPersisted` (class 2) to `kExternal` (class 4) to eliminate setup issues with xCluster configurations. {{}} +* Switches from using scoped_refptr to std::shared_ptr for TabletInfo to handle cycles safely. {{}},{{}} +* Updates cotable IDs in flushed frontiers during a snapshot restore, preventing potential post-restore issues. {{}} +* Allows the persistent mapping of source-target schema versions when a new table is added to a colocated database, thus preventing replication from stalling after T-server restarts. {{}} +* Eliminates potential FATAL errors during reported tabletPB creation by ensuring retrieval of schema version is atomic. {{}} +* Ensures the correct order of destroying components, preventing possible concurrent calls on a WAL append callback. {{}} +* Adds a TSAN suppression to manage the apparent race condition in the function `boost::regex_match`. {{}} +* Fixes the compilation error for almalinux8 fastdebug gcc11 that was previously removed from the build matrix. {{}} +* Corrects a bug causing some tablet metrics to display incorrect `metric_type` attribute. {{}} +* Fixes a segmentation fault in yb-master by checking for a null pointer before dereferencing it, addressing an issue in the CDC run on `2.23.0.0-b37-arm`. {{}} +* Reduces unnecessary logging during checkpoint operations by lowering INFO level logs to DEBUG_LEVEL, enhancing log readability. {{}} +* Allows DML operations on non-replicated databases and blocks DML only on databases in transactional xCluster replication STANDBY mode. Now only databases part of an inbound transactional xCluster replication group in the xCluster safe time map will have DML operations blocked. Also, certain attributes are moved from TServer to TserverXClusterContext. {{}} +* Enables the session to outlive the callback by holding a shared pointer to it, preventing potential crashes during concurrent DML queries. {{}} +* Avoids multiple destruction of the same database connection, preventing system crashes due to simultaneous connection failures. {{}} +* Allows viewing of the RPC bind addresses in the master leader UI, especially beneficial in cases like k8s where the RPC bind address with the pod DNS is more useful than the broadcast address. {{}} +* Prevents fatal errors by skipping ReserveMarker/AsyncAppend if the tablet peer has already been shut down. {{}} +* Prevents yb-master crash by ensuring background task isn't deleted before the callback is invoked. {{}} +* Enables callback completion wait in PollTransactionStatusBase during shutdown to prevent unexpected process termination. {{}} +* Initializes `prev_op` to `UNKNOWN` to prevent AlmaLinux 8 fastdebug gcc11 compilation failures. {{}} +* Enables batched metric updates for YCQL reads to prevent performance drop due to RocksDB metric updates. {{}} +* Removes pending delete logic from load balancer to prevent delays during high tablet replica movement. {{}} +* Enhances YSQL operation by refining task shutdown procedures and avoiding unnecessary task aborts. {{}} +* Stops fatal errors caused by the re-use of remote log anchor session during remote bootstrap from a non-leader peer. This fix ensures shared pointers are accurately tracked for `tablet_peer` objects using the `=` operator, preventing unintentional destruction of underlying objects. {{}} +* Delays `min_running_ht` initialization until after the successful completion of tablet bootstrap to prevent unexpected behaviors. {{}} +* Enables the `skip_table_tombstone_check` for colocated tables to prevent errors. {{}} +* Prevents potential segfaults during catalog reload by modifying `GetClusterConfig` function. {{}} +* Reduces the interval of the tablet server metrics heartbeat to prevent potential misreporting of a leaderless tablet. {{}} +* Reduces four threads and a meta cache in xCluster consumer by reusing the TServers yb::client. {{}} +* Resolves the issue of `pg_locks` query failure due to missing host node UUID in distributed transactions. {{}} +* Clarifies memory division flags to reflect they are percentage of the process's hard memory limit, not total available memory. {{}} +* Eliminates latency spikes in conflicting workloads by preventing redundant ProbeTransactionDeadlock RPCs. {{}} +* Corrects the CI build issues on GCC 12, debug AlmaLinux 9 caused by updates in cf0c09b. {{}} +* Captures the actual user executing the query instead of only the superuser and fixes ordering in the ddl_queue handler. {{}} +* Enhances logging during MemTable flushing to better monitor memory usage limits. {{}} +* Prevents premature metric destruction during Prometheus scrapes, resolving non-UTF8 character issues. {{}} +* Ensures object drops correctly cascade to dependent columns in the DocDB table, preventing inconsistencies. {{}} +* Boosts YSQL major version upgrade process by minimal changes in the master branch to reduce its divergence. {{}} +* Adds a flag to disable the intent filtering during bootstrap, preventing potential data corruption on restart. {{}} +* Allows large bytes to be requested on RateLimiter, preventing indefinite call stalling. {{}} +* Restores the previously missing home icon in the master user interface. {{}} +* Removes the `read-time` option from the ysql_dump help output for proper DDL atomicity handling. {{}} +* Allows setting custom snapshot retention duration using `yb-admin`, including retaining a snapshot indefinitely by setting `retention_duration_hours` to 0. {{}} +* Removes an unreachable line causing GH build failure. {{}} +* Corrects an issue where the load balancer improperly handles a pending leader stepdown task. {{}} +* Removes the TServer warning log for mismatching cluster config versions, reducing unnecessary noise in logs. {{}} +* Eliminates memory leaks in YSQL Connection Manager by ensuring proper deallocation of objects and variables. {{}} +* Resolves the TServer crash issue during query execution happening due to a NULL pointer dereference. {{}} +* Allows for a reliable connection to a remote YugabyteDB universe by setting `skip_master_flagfile` when creating YBClients. {{}} +* Ensures the node restarts properly even with `secure` mode enabled by adding a timeout framework. {{}} +* Ensures the `stack_is_too_deep` function returns predictable results in ASAN, aiding effective limit setting on stack depth. {{}} +* Introduces a check for multi-threaded mode in catalog lookup functions to avoid server crashes and make error mitigation easier for users. {{}} +* Enhances visibility of the `Hidden` state in Master/Tserver Tables UI by shifting its position more prominently to the `State` column. {{}} +* Increases the speed of the `yb-admin snapshot schedule create` command to reduce resource usage. {{}} +* Deprecates the TServer flag `enable_pg_savepoints` to prevent incorrect behavior and avoid silent progress in PL/pgSQL exceptions. {{}} +* Eliminates the occurrence of "schema version mismatch" error following a DROP INDEX statement by introducing a delay in index deletion. {{}} + +#### CDC + +* Ensures deletion of MemoryContext after each GetChanges RPC to prevent memory leaks. {{}} +* Introduces additional VLOG statements in the ListReplicationSlots function for better debugging. {{}} +* Prevents newly created indexes, materialized views, and non-user tables from being added to the Chang Data Capture (CDC) stream metadata. {{}} +* Reduces resource usage by removing non-eligible tables, like indexes, from existing CDC SDK stream metadata, and releasing retention barriers. This change requires the master flag `enable_cleanup_of_non_eligible_tables_from_cdcsdk_stream` and limits processing to two non-eligible tables per namespace per run with `cdcsdk_table_processing_limit_per_run`. Introduces three yb-admin commands for managing CDC streams. {{}},{{}},{{}} +* Introduces new auto flag `cdcsdk_enable_identification_of_non_eligible_tables` and three yb-admin commands to manage tables in CDC stream, enhancing control and reducing unnecessary resource usage. {{}},{{}},{{}} +* Fixes the issue of getting either `0` or a random time as the server's system clock in XLogData from the logical replication stream. {{}} +* Fixes a memory leakage issue in the walsender process by deep freeing the cached record batch after streaming to the client. {{}} +* Adds more debug logs in the walsender to aid in investigating issues like linked data loss. {{}} +* Allows for better memory management in the walsender process by storing record batches in a separate memory context. {{}} +* Logs RPC errors as warnings during the cleanup of virtual WAL after LogicalReplication ends. {{}} +* Allows stream replication to handle serialized transactions successfully by adding directory creation logic. {{}} +* Stops loading replication slots from disk during startup to avoid potential system crashes. {{}} +* Adds more logs for easy debugging during stress runs for Change Data Capture (CDC) without any impact on performance. {{}} +* Limits unnecessary RPC calls to the local TServer during RollbackToSubTransaction operation if transaction is read-only, a fast-path transaction, or has NON_TRANSACTIONAL isolation level. {{}} +* Limits transactions' inclusion in the unacked list only upon receiving the commit record, enhancing the restart_lsn calculation. {{}} +* Removes table level attributes from CDCSDK metrics to avoid TServer crash due to failed DCHECK assertion. {{}} +* Fixes the segmentation fault in walsender for dynamic table addition by refreshing stored replica identities and preventing a race condition when creating dynamic tables. {{}} +* Updates the serialization and de-serialization logic to include the yb_is_omitted array, preserving data values in large transactions. {{}} +* Solves an issue where CDCSDK incorrectly deduces tablets as not interesting for stream before reaching the configured time limit. {{}} +* Addresses a race condition in dynamic table creation, enhancing stability during table and tablet initialization. {{}} +* Refines the logic to remove `BEGIN` record when no DML records are added, preventing potential virtual WAL crashes. {{}} +* Resolves "could not open relation" error by updating slot creation method and simplifying yb_read_time logic. {{}} +* Enables support for dynamically allotted OID data types in CDC to prevent system crashes. {{}} +* Allows handling of non-eligible table cleanup in CDC stream loading even after table drop, preventing master crash. {{}} +* Reduces total inserts from 5k/thread to 2.5k/thread for clearer consumption of expected records. {{}} +* Prevents failures in decoding change events by refreshing `cached_schema_details` when executing a new `GetChanges` request if the client indicates a necessity for the schema. {{}} +* Allows pg_replication_slots to return an empty response instead of an error when `ysql_yb_enable_replication_commands` flag is false. {{}} + +
diff --git a/docs/content/preview/releases/ybdb-releases/v2024.1.md b/docs/content/preview/releases/ybdb-releases/v2024.1.md index 902c97cdbdd1..11a1828d3454 100644 --- a/docs/content/preview/releases/ybdb-releases/v2024.1.md +++ b/docs/content/preview/releases/ybdb-releases/v2024.1.md @@ -7,7 +7,7 @@ menu: preview_releases: identifier: v2024.1 parent: ybdb-releases - weight: 2811 + weight: 2810 rightNav: hideH4: true type: docs @@ -70,15 +70,15 @@ docker pull yugabytedb/yugabyte:2024.1.2.0-b77 ### New features -* [Semi-automatic transactional xCluster setup](/stable/deploy/multi-dc/async-replication/async-replication-transactional/). Provides operationally simpler setup and management of YSQL transactional xCluster replication, as well as simpler steps for performing DDL changes. +* [Semi-automatic transactional xCluster setup](/stable/deploy/multi-dc/async-replication/async-replication-transactional/). Provides operationally simpler setup and management of YSQL transactional xCluster replication, as well as simpler steps for performing DDL changes. {{}} * yugabyted * [Voyager assessment visualisation in yugabyted UI](/preview/yugabyte-voyager/migrate/assess-migration/#visualize-the-migration-assessment-report). Yugabyted UI provides a dashboard to allow the users to effectively plan the migrations based on the complexity and also be able to monitor the progress of each migration - * [Backup/restore support with TLS enabled](/stable/reference/configuration/yugabyted/#backup). In secure mode, yugabyted cluster supports taking full backup/restores. + * [Backup/restore support with TLS enabled](/stable/reference/configuration/yugabyted/#backup). In secure mode, yugabyted cluster supports taking full backup/restores. {{}} - * [xCluster support](/stable/reference/configuration/yugabyted/#set-up-xcluster-replication-between-clusters). yugabyted enables native support for setting up xCluster between two yugabyted deployed clusters. + * [xCluster support](/stable/reference/configuration/yugabyted/#set-up-xcluster-replication-between-clusters). yugabyted enables native support for setting up xCluster between two yugabyted deployed clusters. {{}} ### Change log diff --git a/docs/content/preview/yugabyte-platform/install-yugabyte-platform/migrate-replicated.md b/docs/content/preview/yugabyte-platform/install-yugabyte-platform/migrate-replicated.md index 968dc0267c30..f51777868b33 100644 --- a/docs/content/preview/yugabyte-platform/install-yugabyte-platform/migrate-replicated.md +++ b/docs/content/preview/yugabyte-platform/install-yugabyte-platform/migrate-replicated.md @@ -18,12 +18,12 @@ YugabyteDB Anywhere (YBA) will end support for Replicated installation at the en - Review the [prerequisites](../../prepare/). - YBA Installer can perform the migration in place. Make sure you have enough disk space on your current machine for both the Replicated and YBA Installer installations. - If your Replicated installation is v2.18.5 or earlier, or v2.20.0, [upgrade your installation](../../upgrade/upgrade-yp-replicated/) to v2.20.1.3 or later. -- If you haven't already, [download and extract](../install-software/installer/#download-yba-installer) YBA Installer. It is recommended that you migrate using the same version of YBA Installer as the version of YBA you are running in Replicated. For example, if you have v.{{}} installed, use the following commands: +- If you haven't already, [download and extract](../install-software/installer/#download-yba-installer) YBA Installer. It is recommended that you migrate using the same version of YBA Installer as the version of YBA you are running in Replicated. For example, if you have v.{{}} installed, use the following commands: ```sh - $ wget https://downloads.yugabyte.com/releases/{{}}/yba_installer_full-{{}}-linux-x86_64.tar.gz - $ tar -xf yba_installer_full-{{}}-linux-x86_64.tar.gz - $ cd yba_installer_full-{{}}/ + $ wget https://downloads.yugabyte.com/releases/{{}}/yba_installer_full-{{}}-linux-x86_64.tar.gz + $ tar -xf yba_installer_full-{{}}-linux-x86_64.tar.gz + $ cd yba_installer_full-{{}}/ ``` ## Migrate a Replicated installation diff --git a/docs/data/currentVersions.json b/docs/data/currentVersions.json index d5169a44fe47..fea50b7584eb 100644 --- a/docs/data/currentVersions.json +++ b/docs/data/currentVersions.json @@ -2,14 +2,14 @@ "dbVersions": [ { - "series": "v2.21", + "series": "v2.23", "alias": "preview", - "display": "v2.21 (Preview)", - "version": "2.21.1.0", - "versionShort": "2.21.1", - "appVersion": "2.21.1.0-b271", + "display": "v2.23 (Preview)", + "version": "2.23.0.0", + "versionShort": "2.23.0", + "appVersion": "2.23.0.0-b711", "isStable": false, - "initialRelease": "2024-03-26" + "initialRelease": "2024-09-13" }, { "series": "v2024.1", From e9ec9e2bfc7068bd7f018afa3ba6aa7d83dc4fb0 Mon Sep 17 00:00:00 2001 From: Mikhail Bautin Date: Fri, 13 Sep 2024 12:24:41 -0700 Subject: [PATCH 13/75] [#23846] Support for automatically syncing inline third-party dependencies Summary: Update thirdparty_tool to allow automatically syncing up the inline (currently header-only) third-party dependencies stored in src/inline-thirdparty. We only copy separate header subdirectories of those upstream repositories into our codebase. This light-weight process avoids going through the regular third-party dependency build and release loop. The inline third-party dependencies are described by the new configuration file build-support/inline_thirdparty.yml. Also refactoring thirdparty_tool into separate modules. Test Plan: bin/run_codecheck bin/thirdparty_tool --sync-inline-thirdparty Regression testing of thirdparty_tool: bin/thirdparty_tool --update Jenkins: compile only Reviewers: steve.varnau Reviewed By: steve.varnau Subscribers: ybase Differential Revision: https://phorge.dev.yugabyte.com/D37916 --- build-support/inline_thirdparty.yml | 39 + python/yugabyte/file_util.py | 4 + python/yugabyte/git_util.py | 67 ++ python/yugabyte/inline_thirdparty.py | 246 +++++ python/yugabyte/string_util.py | 15 +- .../yugabyte/thirdparty_archives_metadata.py | 338 +++++++ python/yugabyte/thirdparty_releases.py | 261 ++++++ python/yugabyte/thirdparty_tool.py | 840 +----------------- python/yugabyte/thirdparty_tool_impl.py | 300 +++++++ src/inline-thirdparty/README.md | 36 +- 10 files changed, 1308 insertions(+), 838 deletions(-) create mode 100644 build-support/inline_thirdparty.yml create mode 100644 python/yugabyte/inline_thirdparty.py create mode 100644 python/yugabyte/thirdparty_archives_metadata.py create mode 100644 python/yugabyte/thirdparty_releases.py create mode 100644 python/yugabyte/thirdparty_tool_impl.py diff --git a/build-support/inline_thirdparty.yml b/build-support/inline_thirdparty.yml new file mode 100644 index 000000000000..bd5e7a61fe4f --- /dev/null +++ b/build-support/inline_thirdparty.yml @@ -0,0 +1,39 @@ +# Copyright (c) YugabyteDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations +# under the License. +# + +# See the README.md file in src/inline-thirdparty for more information. + +dependencies: + - name: usearch + git_url: https://github.com/unum-cloud/usearch + commit: 4fbb56e02aa928a011abdedb66adfef128123e5f + src_dir: include + dest_dir: usearch + + - name: fp16 + git_url: https://github.com/Maratyszcza/FP16/ + commit: 98b0a46bce017382a6351a19577ec43a715b6835 + src_dir: include + dest_dir: fp16 + + - name: hnswlib + git_url: https://github.com/nmslib/hnswlib + commit: 2142dc6f4dd08e64ab727a7bbd93be7f732e80b0 + src_dir: hnswlib + dest_dir: hnswlib/hnswlib + + - name: simsimd + git_url: https://github.com/ashvardanian/simsimd + src_dir: include + dest_dir: simsimd + tag: v5.1.0 diff --git a/python/yugabyte/file_util.py b/python/yugabyte/file_util.py index b6a29e26ca06..256c6ef04332 100644 --- a/python/yugabyte/file_util.py +++ b/python/yugabyte/file_util.py @@ -63,6 +63,10 @@ def read_file(file_path: Union[str, pathlib.Path]) -> str: def write_file( content: Union[str, List[str]], output_file_path: Union[str, pathlib.Path]) -> None: + if '\n' in str(output_file_path): + raise ValueError( + "Output file path cannot contain newlines. It is possible that file content and path " + f"were reversed accidentally. Content: {content}, output_file_path: {output_file_path}") if isinstance(content, list): content = '\n'.join(content) + '\n' with open(path_to_str(output_file_path), 'w') as output_file: diff --git a/python/yugabyte/git_util.py b/python/yugabyte/git_util.py index 3bd599726f36..33793e2fc0bf 100644 --- a/python/yugabyte/git_util.py +++ b/python/yugabyte/git_util.py @@ -10,7 +10,74 @@ # or implied. See the License for the specific language governing permissions and limitations # under the License. +import logging +import os import re +import subprocess + +from typing import Optional + +from yugabyte.file_util import read_file SHA1_RE = re.compile(r'^[0-9a-f]{40}$') + + +def is_valid_git_sha(commit: str) -> bool: + return SHA1_RE.match(commit) is not None + + +def validate_git_commit(commit: str) -> str: + commit = commit.strip().lower() + if not is_valid_git_sha(commit): + raise ValueError(f"Invalid Git commit SHA1: {commit}") + return commit + + +def get_github_token(token_file_path: Optional[str]) -> Optional[str]: + github_token: Optional[str] + if token_file_path: + logging.info("Reading GitHub token from %s", token_file_path) + github_token = read_file(token_file_path).strip() + else: + github_token = os.getenv('GITHUB_TOKEN') + if github_token is None: + return github_token + + if len(github_token) != 40: + raise ValueError(f"Invalid GitHub token length: {len(github_token)}, expected 40.") + return github_token + + +def is_git_clean(repo_dir: str) -> bool: + # Check for uncommitted changes (staged or unstaged) + result = subprocess.run(['git', 'status', '--porcelain'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + cwd=repo_dir, + check=True) + + # If the result is an empty string, the working directory is clean + return result.stdout.strip() == '' + + +def get_latest_commit_in_subdir(repo_dir: str, subdir: str) -> str: + """ + Get the latest commit that affected a particular subdirectory. + """ + assert not os.path.isabs(subdir), \ + f"Subdirectory must be a relative path, not an absolute path: {subdir}" + result = subprocess.run( + ['git', 'log', '-n', '1', '--pretty=format:%H', '--', subdir], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + cwd=repo_dir, + check=True + ) + commit_sha = result.stdout.strip() + if not commit_sha: + raise ValueError(f"No commits found for subdirectory: {subdir}") + validate_git_commit(commit_sha) + return commit_sha diff --git a/python/yugabyte/inline_thirdparty.py b/python/yugabyte/inline_thirdparty.py new file mode 100644 index 000000000000..63e494eec0ea --- /dev/null +++ b/python/yugabyte/inline_thirdparty.py @@ -0,0 +1,246 @@ +# Copyright (c) YugabyteDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations +# under the License. +# + +# Manages header-only third-party dependencies in src/inline-thirdparty. These dependencies are +# copied from the relevant subdirectories of upstream repositories and typically represent only a +# small portion of the upstream repository. + +import logging +import os +import shutil +import subprocess +import tempfile + +from pathlib import Path +from typing import Optional, List, Set +from dataclasses import dataclass + +import ruamel.yaml + +from yugabyte import common_util, file_util, git_util + + +INLINE_THIRDPARTY_CONFIG_REL_PATH = 'build-support/inline_thirdparty.yml' +INLINE_THIRDPARTY_CONFIG_PATH = os.path.join( + common_util.YB_SRC_ROOT, INLINE_THIRDPARTY_CONFIG_REL_PATH) +INLINE_THIRDPARTY_SRC_DIR = os.path.join(common_util.YB_SRC_ROOT, 'src', 'inline-thirdparty') + +FILE_EXTENSIONS_SUPPORTING_CPP_COMMENTS = ('.c', '.cc', '.cpp', '.h', '.hpp', '.modulemap') + + +ruamel_yaml_object = ruamel.yaml.YAML() + + +@dataclass +class InlineDependency: + name: str + git_url: str + src_dir: str + dest_dir: str + tag: Optional[str] = None + commit: Optional[str] = None + + def validate_tag_or_commit_choice(self) -> None: + if self.tag and self.commit: + raise ValueError(f"Only one of tag or commit can be specified: {self}") + if not self.tag and not self.commit: + raise ValueError(f"One of tag or commit must be specified: {self}") + + @property + def tag_or_commit_description(self) -> str: + self.validate_tag_or_commit_choice() + if self.tag: + return f"tag {self.tag}" + if self.commit: + return f"commit {self.commit}" + raise ValueError(f"Should not happen: {self}") + + @property + def tag_or_commit(self) -> str: + self.validate_tag_or_commit_choice() + result = self.tag or self.commit + assert result is not None + return result + + def get_github_commits_url(self, resolved_commit: str) -> str: + return self.git_url + '/commits/' + resolved_commit + + +@dataclass +class DependenciesConfig: + dependencies: List[InlineDependency] + + +def read_yaml(file_path: str) -> DependenciesConfig: + """Reads the YAML file and maps it to DependenciesConfig.""" + with open(file_path) as file: + data = ruamel_yaml_object.load(file) + dependencies = [InlineDependency(**dep) for dep in data['dependencies']] + return DependenciesConfig(dependencies=dependencies) + + +def get_latest_commit_explanation( + dep: InlineDependency, + latest_commit_in_subdir: str, + cpp_comment: bool = False) -> str: + return ( + f"Latest commit in the {dep.src_dir} subdirectory of the {dep.name} repository:\n" + + ("// " if cpp_comment else "") + + latest_commit_in_subdir + ) + + +def add_comment_to_file( + file_path: str, + dep: InlineDependency, + latest_commit_in_subdir: str) -> None: + """Adds a comment to the include file indicating what version of the dependcy is being used.""" + if not file_path.endswith(FILE_EXTENSIONS_SUPPORTING_CPP_COMMENTS): + logging.info("Cannot add comment to file %s", file_path) + return + if os.path.islink(file_path): + logging.info("Cannot add comment to symlink %s", file_path) + return + + content = file_util.read_file(file_path) + comment = "\n".join([ + f"// This file is part of the {dep.name} inline third-party dependency of YugabyteDB.", + f"// Git repo: {dep.git_url}", + f"// Git tag: {dep.tag}" if dep.tag else f"// Git commit: {dep.commit}", + f"// {get_latest_commit_explanation(dep, latest_commit_in_subdir, cpp_comment=True)}", + "//", + "// See also src/inline-thirdparty/README.md.", + ]) + file_util.write_file(comment + '\n\n' + content, file_path) + + +def validate_dir(dep: InlineDependency, dir_type: str) -> None: + dir_value = getattr(dep, dir_type) + if not dir_value: + raise ValueError(f"{dir_type} is required for {dep.name}") + + if os.path.isabs(dir_value): + raise ValueError(f"{dir_type} must be a relative path for {dep.name}") + + if not dep.git_url.startswith('https://github.com/'): + raise ValueError(f"git_url must be a GitHub URL for {dep.name}") + + +def validate_config(config: DependenciesConfig) -> None: + """Validates the config.""" + names_seen: Set[str] = set() + for dep in config.dependencies: + if dep.name in names_seen: + raise ValueError(f"Duplicate name {dep.name}") + names_seen.add(dep.name) + + if not dep.git_url: + raise ValueError(f"git_url is required for {dep.name}") + + validate_dir(dep, "src_dir") + validate_dir(dep, "dest_dir") + if dep.dest_dir != dep.name and not dep.dest_dir.startswith(dep.name + '/'): + raise ValueError( + f"dest_str must be the same as dependency name or have the dependency name as " + f"its first relative path component for {dep.name}: {dep}") + dep.validate_tag_or_commit_choice() + + +def clone_and_copy_subtrees(dependencies: List[InlineDependency]) -> None: + """Clones repositories into a temporary directory and copies the subtrees.""" + src_root = Path(INLINE_THIRDPARTY_SRC_DIR) + + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + for dep in dependencies: + repo_dir = temp_path / dep.name + logging.info(f"Cloning {dep.name} into {repo_dir}") + + # Clone the repository into the temp directory + subprocess.check_call(['git', 'clone', dep.git_url, str(repo_dir)]) + + # Checkout the specified tag or commit + subprocess.check_call(['git', 'checkout', dep.tag_or_commit], cwd=repo_dir) + + resolved_commit = git_util.get_latest_commit_in_subdir(str(repo_dir), subdir='.') + if dep.commit and resolved_commit != dep.commit: + raise ValueError( + f"Expected resolved commit {resolved_commit} to match configured commit " + f"{dep.commit} for dependency {dep.name}") + + # Define source and destination directories + src_subtree = repo_dir / dep.src_dir + dest_subtree = src_root / dep.dest_dir + + logging.info("Copying subtree from {} to {}".format(src_subtree, dest_subtree)) + + # Ensure the destination directory exists + dest_subtree.parent.mkdir(parents=True, exist_ok=True) + + # Remove the current content in the destination directory. We remove the entire + # top-level directory under inline-thirdparty, even though dest_dir could contain + # multiple path components. + subtree_to_remove = src_root / Path(dep.dest_dir).parts[0] + if subtree_to_remove.exists(): + logging.info(f"Deleting existing directory {subtree_to_remove}") + shutil.rmtree(subtree_to_remove) + + # Copy the subtree to the destination directory + shutil.copytree(src_subtree, dest_subtree) + + latest_commit_in_subdir = git_util.get_latest_commit_in_subdir( + str(repo_dir), dep.src_dir) + for root, dirs_unused, files in os.walk(dest_subtree): + for file in files: + file_path = os.path.join(root, file) + add_comment_to_file(file_path, dep, latest_commit_in_subdir) + + # Commit the changes in the current repository + make_commit(dep, latest_commit_in_subdir, resolved_commit) + + +def make_commit( + dep: InlineDependency, latest_commit_in_subdir: str, resolved_commit: str) -> None: + """Creates a descriptive commit in the main YugabyteDB repo for the updated dependency.""" + git_util.validate_git_commit(latest_commit_in_subdir) + git_util.validate_git_commit(resolved_commit) + + if git_util.is_git_clean(common_util.YB_SRC_ROOT): + logging.info(f"No changes were made to the {dep.name} dependency, nothing to commit.") + return + + commit_message_lines = [ + "Automatic commit by thirdparty_tool: " + + f"update {dep.name} to {dep.tag_or_commit_description}.", + "", + f"Used commit of the {dep.name} repository: {dep.get_github_commits_url(resolved_commit)}", + ] + if latest_commit_in_subdir != resolved_commit: + commit_message_lines.extend([ + "", + get_latest_commit_explanation(dep, latest_commit_in_subdir) + ]) + + commit_message = "\n".join(commit_message_lines) + subprocess.check_call(['git', 'add', '.'], cwd=INLINE_THIRDPARTY_SRC_DIR) + subprocess.check_call(['git', 'commit', '-m', commit_message], cwd=INLINE_THIRDPARTY_SRC_DIR) + logging.info(f"Created an automatic commit for {dep.name}") + + +def sync_inline_thirdparty() -> None: + config = read_yaml(INLINE_THIRDPARTY_CONFIG_PATH) + validate_config(config) + if not git_util.is_git_clean(common_util.YB_SRC_ROOT): + raise RuntimeError(f"Local changes exist, cannot update inline third-party dependencies.") + clone_and_copy_subtrees(config.dependencies) diff --git a/python/yugabyte/string_util.py b/python/yugabyte/string_util.py index 422c05a538de..6ff79222fe87 100644 --- a/python/yugabyte/string_util.py +++ b/python/yugabyte/string_util.py @@ -11,7 +11,7 @@ # under the License. import hashlib -from typing import Union +from typing import Union, Optional, Any def encode_if_needed(s: Union[bytes, str]) -> bytes: @@ -22,3 +22,16 @@ def encode_if_needed(s: Union[bytes, str]) -> bytes: def compute_sha256(s: Union[bytes, str]) -> str: return hashlib.sha256(encode_if_needed(s)).hexdigest() + + +def none_to_empty_string(x: Optional[Any]) -> Any: + if x is None: + return '' + return x + + +def matches_maybe_empty(a: Optional[str], b: Optional[str]) -> bool: + """ + Returns True if a or b are equal, but treating all None values as empty strings. + """ + return (a or '') == (b or '') diff --git a/python/yugabyte/thirdparty_archives_metadata.py b/python/yugabyte/thirdparty_archives_metadata.py new file mode 100644 index 000000000000..bd70609e1586 --- /dev/null +++ b/python/yugabyte/thirdparty_archives_metadata.py @@ -0,0 +1,338 @@ +# Copyright (c) YugabyteDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations +# under the License. +# + +# Tools for manipulating the thirdparty_archives.yml file. + +import logging +import os +import pprint +import re +import time + +from collections import defaultdict +from io import StringIO +from re import Pattern +from typing import Set, Dict, Any, Optional, List, Union, DefaultDict, Tuple + +import ruamel.yaml + +from github import Github, GithubException + +from yugabyte.thirdparty_releases import ( + DOWNLOAD_URL_PREFIX, + get_archive_name_from_tag, + ThirdPartyReleaseBase, +) +from yugabyte import common_util +from yugabyte.common_util import ( + YB_SRC_ROOT, + load_yaml_file, +) +from yugabyte.file_util import read_file +from yugabyte.git_util import get_github_token, is_valid_git_sha +from yugabyte.thirdparty_releases import ( + ReleaseGroup, + GitHubThirdPartyRelease, + SkipThirdPartyReleaseException, +) + + +ruamel_yaml_object = ruamel.yaml.YAML() + + +# A plain Python data structure nested type for the thirdparty_archives.yml file structure. +ThirdPartyArchivesYAML = Dict[str, Union[str, List[Dict[str, str]]]] + + +THIRDPARTY_ARCHIVES_REL_PATH = os.path.join('build-support', 'thirdparty_archives.yml') +MANUAL_THIRDPARTY_ARCHIVES_REL_PATH = os.path.join( + 'build-support', 'thirdparty_archives_manual.yml') + +# Skip these problematic tags. +BROKEN_TAGS = set(['v20210907234210-47a70bc7dc-centos7-x86_64-linuxbrew-gcc5']) + +# We will store the SHA1 to be used for the local third-party checkout, as well as to use by default +# for individual archives unless it is overridden, under this top-level key in the third-party +# archive metaadata YAML files. Furthermore, for each third-party release, we will store the SHA +# of the commit that corresponds to the release tag under the same key, unless it is different from +# the default value stored at the top level. +SHA_KEY = 'sha' + +# The top-level key under which we store metadata for third-party archives. +ARCHIVES_KEY = 'archives' + + +@ruamel_yaml_object.register_class +class MetadataItem(ThirdPartyReleaseBase): + """ + A metadata item for a third-party download archive loaded from the thirdparty_archives.yml + file. + """ + + def __init__(self, yaml_data: Dict[str, Any]) -> None: + processed_field_names: Set[str] = set() + for field_name in MetadataItem.KEY_FIELDS_WITH_TAG: + field_value = yaml_data.get(field_name) + if field_value is None: + field_value = ThirdPartyReleaseBase.get_default_field_value(field_name) + setattr(self, field_name, field_value) + processed_field_names.add(field_name) + unknown_fields = set(yaml_data.keys() - processed_field_names) + if unknown_fields: + raise ValueError( + "Unknown fields found in third-party metadata YAML file: %s. " + "Entire item: %s" % (sorted(unknown_fields), pprint.pformat(yaml_data))) + + def url(self) -> str: + return f'{DOWNLOAD_URL_PREFIX}{self.tag}/{get_archive_name_from_tag(self.tag)}' + + +def get_archive_metadata_file_path() -> str: + return os.path.join(YB_SRC_ROOT, THIRDPARTY_ARCHIVES_REL_PATH) + + +def get_manual_archive_metadata_file_path() -> str: + return os.path.join(YB_SRC_ROOT, MANUAL_THIRDPARTY_ARCHIVES_REL_PATH) + + +class MetadataUpdater: + github_token_file_path: str + tag_filter_pattern: Optional[Pattern] + also_use_commits: List[str] + archive_metadata_path: str + override_default_sha: Optional[str] + + def __init__( + self, + github_token_file_path: str, + tag_filter_regex_str: Optional[str], + also_use_commits: List[str], + override_default_sha: Optional[str]) -> None: + self.github_token_file_path = github_token_file_path + if tag_filter_regex_str: + self.tag_filter_pattern = re.compile(tag_filter_regex_str) + else: + self.tag_filter_pattern = None + self.also_use_commits = also_use_commits + self.archive_metadata_path = get_archive_metadata_file_path() + self.override_default_sha = override_default_sha + + def update_archive_metadata_file(self) -> None: + yb_version = read_file(os.path.join(YB_SRC_ROOT, 'version.txt')).strip() + + logging.info(f"Updating third-party archive metadata file in {self.archive_metadata_path}") + + github_client = Github(get_github_token(self.github_token_file_path)) + repo = github_client.get_repo('yugabyte/yugabyte-db-thirdparty') + + releases_by_commit: Dict[str, ReleaseGroup] = {} + num_skipped_old_tag_format = 0 + num_skipped_wrong_branch = 0 + num_skipped_too_os_specific = 0 + num_releases_found = 0 + + releases = [] + get_releases_start_time_sec = time.time() + try: + for release in repo.get_releases(): + releases.append(release) + except GithubException as exc: + if 'Only the first 1000 results are available.' in str(exc): + logging.info("Ignoring exception: %s", exc) + else: + raise exc + logging.info("Time spent to iterate all releases: %.1f sec", + time.time() - get_releases_start_time_sec) + + for release in releases: + sha: str = release.target_commitish + assert isinstance(sha, str) + + if not is_valid_git_sha(sha): + sha = repo.get_commit(sha).sha + + tag_name = release.tag_name + if len(tag_name.split('-')) <= 2: + logging.debug(f"Skipping release tag: {tag_name} (old format, too few components)") + num_skipped_old_tag_format += 1 + continue + if self.tag_filter_pattern and not self.tag_filter_pattern.match(tag_name): + logging.info(f'Skipping tag {tag_name}, does not match the filter') + continue + + try: + yb_dep_release = GitHubThirdPartyRelease(release, target_commitish=sha) + except SkipThirdPartyReleaseException as ex: + logging.warning("Skipping release: %s", ex) + continue + + if not yb_dep_release.is_consistent_with_yb_version(yb_version): + logging.info( + f"Skipping release tag: {tag_name} (does not match version {yb_version})") + num_skipped_wrong_branch += 1 + continue + + if yb_dep_release.should_skip_as_too_os_specific(): + logging.info( + f"Skipping release {yb_dep_release} because it is too specific to a particular " + "version of OS and we could use a build for an older OS instead.") + num_skipped_too_os_specific += 1 + continue + + if sha not in releases_by_commit: + releases_by_commit[sha] = ReleaseGroup(sha) + + num_releases_found += 1 + logging.debug(f"Found release: {yb_dep_release}") + releases_by_commit[sha].add_release(yb_dep_release) + + if num_skipped_old_tag_format > 0: + logging.info(f"Skipped {num_skipped_old_tag_format} releases due to old tag format") + if num_skipped_wrong_branch > 0: + logging.info(f"Skipped {num_skipped_wrong_branch} releases due to branch mismatch") + if num_skipped_too_os_specific > 0: + logging.info(f"Skipped {num_skipped_too_os_specific} releases as too OS-specific") + logging.info( + f"Found {num_releases_found} releases for {len(releases_by_commit)} different commits") + + latest_group_by_max = max( + releases_by_commit.values(), key=ReleaseGroup.get_max_creation_timestamp) + latest_group_by_min = max( + releases_by_commit.values(), key=ReleaseGroup.get_min_creation_timestamp) + if latest_group_by_max is not latest_group_by_min: + raise ValueError( + "Overlapping releases for different commits. No good way to identify latest " + "release: e.g. {latest_group_by_max.sha} and {latest_group_by_min.sha}.") + + latest_group: ReleaseGroup = latest_group_by_max + + latest_release_sha = latest_group.sha + logging.info( + f"Latest released yugabyte-db-thirdparty commit: {latest_release_sha}. " + f"Released at: {latest_group.get_max_creation_timestamp()}.") + + groups_to_use: List[ReleaseGroup] = [latest_group] + + if self.also_use_commits: + for extra_commit in self.also_use_commits: + logging.info(f"Additional manually specified commit to use: {extra_commit}") + if extra_commit == latest_release_sha: + logging.info( + f"(already matches the latest commit {latest_release_sha}, skipping.)") + continue + if extra_commit not in releases_by_commit: + raise ValueError( + f"No releases found for user-specified commit {extra_commit}. " + "Please check if there is an error.") + groups_to_use.append(releases_by_commit[extra_commit]) + + releases_to_use: List[GitHubThirdPartyRelease] = [ + rel for release_group in groups_to_use + for rel in release_group.releases + if rel.tag not in BROKEN_TAGS + ] + + default_sha = self.override_default_sha or latest_release_sha + archives: List[Dict[str, str]] = [] + new_metadata: ThirdPartyArchivesYAML = { + SHA_KEY: default_sha + } + + releases_by_key_without_tag: DefaultDict[Tuple[str, ...], List[GitHubThirdPartyRelease]] = \ + defaultdict(list) + + num_valid_releases = 0 + num_invalid_releases = 0 + for yb_thirdparty_release in releases_to_use: + if yb_thirdparty_release.validate_url(): + num_valid_releases += 1 + releases_by_key_without_tag[ + yb_thirdparty_release.get_sort_key(include_tag=False) + ].append(yb_thirdparty_release) + else: + num_invalid_releases += 1 + logging.info( + f"Valid releases found: {num_valid_releases}, invalid releases: {num_invalid_releases}") + + filtered_releases_to_use = [] + for key_without_tag, releases_for_key in releases_by_key_without_tag.items(): + if len(releases_for_key) > 1: + picked_release = max(releases_for_key, key=lambda r: r.tag) + logging.info( + "Multiple releases found for the same key (excluding the tag). " + "Using the latest one: %s\n" + "Key: %s.\nReleases:\n %s" % ( + picked_release, + key_without_tag, + '\n '.join([str(r) for r in releases_for_key]))) + filtered_releases_to_use.append(picked_release) + else: + filtered_releases_to_use.append(releases_for_key[0]) + + filtered_releases_to_use.sort(key=GitHubThirdPartyRelease.get_sort_key) + + for yb_thirdparty_release in filtered_releases_to_use: + release_as_dict = yb_thirdparty_release.as_dict() + if release_as_dict[SHA_KEY] == default_sha: + # To reduce the size of diffs when updating third-party archives YAML file. + del release_as_dict[SHA_KEY] + archives.append(release_as_dict) + new_metadata[ARCHIVES_KEY] = archives + + self.write_metadata_file(new_metadata) + logging.info( + f"Wrote information for {len(filtered_releases_to_use)} pre-built " + f"yugabyte-db-thirdparty archives to {self.archive_metadata_path}.") + + def write_metadata_file( + self, + new_metadata: ThirdPartyArchivesYAML) -> None: + yaml = common_util.get_ruamel_yaml_instance() + string_stream = StringIO() + yaml.dump(new_metadata, string_stream) + yaml_lines = string_stream.getvalue().split('\n') + new_lines = [] + for line in yaml_lines: + if line.startswith(' -'): + new_lines.append('') + new_lines.append(line) + while new_lines and new_lines[-1].strip() == '': + new_lines.pop() + + with open(self.archive_metadata_path, 'w') as output_file: + output_file.write('\n'.join(new_lines) + '\n') + + +def load_metadata_file(file_path: str) -> ThirdPartyArchivesYAML: + data = load_yaml_file(file_path) + default_sha = data.get(SHA_KEY) + if default_sha is not None: + for archive in data[ARCHIVES_KEY]: + if archive.get(SHA_KEY, '').strip() == '': + archive[SHA_KEY] = default_sha + if ARCHIVES_KEY not in data: + data[ARCHIVES_KEY] = [] + unexpected_keys = data.keys() - [ARCHIVES_KEY, SHA_KEY] + if unexpected_keys: + raise ValueError( + f"Found unexpected keys in third-party archive metadata loaded from file {file_path}. " + f"Details: {pprint.pformat(data)}") + return data + + +def load_metadata() -> ThirdPartyArchivesYAML: + return load_metadata_file(get_archive_metadata_file_path()) + + +def load_manual_metadata() -> ThirdPartyArchivesYAML: + return load_metadata_file(get_manual_archive_metadata_file_path()) diff --git a/python/yugabyte/thirdparty_releases.py b/python/yugabyte/thirdparty_releases.py new file mode 100644 index 000000000000..495f302f012a --- /dev/null +++ b/python/yugabyte/thirdparty_releases.py @@ -0,0 +1,261 @@ +# Copyright (c) YugabyteDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations +# under the License. +# + +# Utilities for finding available third-party release archives from GitHub. + +import logging +import re + +from typing import Optional, Tuple, Dict, Any, List, Set +from datetime import datetime + +from autorepr import autorepr # type: ignore +from github.GitRelease import GitRelease +from sys_detection import SHORT_OS_NAME_REGEX_STR + +from yugabyte.string_util import none_to_empty_string +from yugabyte.os_versions import adjust_os_type + + +DOWNLOAD_URL_PREFIX = 'https://github.com/yugabyte/yugabyte-db-thirdparty/releases/download/' + +# These were incorrectly used without the "clang" prefix to indicate various versions of Clang. +NUMBER_ONLY_VERSIONS_OF_CLANG = [str(i) for i in [12, 13, 14]] + +COMPILER_TYPE_RE_STR = (r'(?:-(?P(?:((?:gcc|clang|devtoolset-?)[a-z0-9.]+)|%s)))?' % + '|'.join(NUMBER_ONLY_VERSIONS_OF_CLANG)) + +ALLOWED_LTO_TYPES = ['thin', 'full'] + +# Linux distribution with the oldest glibc available to us. Third-party archives built on this +# OS can be used on newer Linux distributions, unless we need ASAN/TSAN. +PREFERRED_OS_TYPE = 'amzn2' + + +def get_arch_regex(index: int) -> str: + """ + There are two places where the architecture could appear in the third-party archive release tag. + We make them available under "architecture1" and "architecture2" capture group names. + """ + arch_regex_str = '|'.join(['x86_64', 'aarch64', 'arm64']) + return r'(?:-(?P%s))?' % (index, arch_regex_str) + + +def get_archive_name_from_tag(tag: str) -> str: + return f'yugabyte-db-thirdparty-{tag}.tar.gz' + + +TAG_RE_STR = ''.join([ + r'^v(?:(?P[0-9.]+)-)?', + r'(?P[0-9]+)-', + r'(?P[0-9a-f]+)', + get_arch_regex(1), + r'(?:-(?P(?:%s)[a-z0-9.]*))' % SHORT_OS_NAME_REGEX_STR, + get_arch_regex(2), + r'(?:-(?Plinuxbrew))?', + # "devtoolset" really means just "gcc" here. We should replace it with "gcc" in release names. + # Also, "12", "13" and "14" were incorectly used instead of "clang13" in some release archive + # names. + COMPILER_TYPE_RE_STR, + r'(?:-(?Plinuxbrew))?', + r'(?:-(?:(?P%s)-lto))?' % '|'.join(ALLOWED_LTO_TYPES), + r'$', +]) +TAG_RE = re.compile(TAG_RE_STR) + + +class ThirdPartyReleaseBase: + # The list of fields without the release tag. The tag is special because it includes the + # timestamp, so by repeating a build on the same commit in yugabyte-db-thirdparty, we could get + # multiple releases that have the same OS/architecture/compiler type/SHA but different tags. + # Therefore we distinguish between "key with tag" and "key with no tag" + KEY_FIELDS_NO_TAG = [ + 'os_type', 'architecture', 'compiler_type', 'is_linuxbrew', 'sha', 'lto_type' + ] + KEY_FIELDS_WITH_TAG = KEY_FIELDS_NO_TAG + ['tag'] + + os_type: str + architecture: str + compiler_type: str + is_linuxbrew: bool + sha: str + tag: str + lto_type: Optional[str] + + __str__ = __repr__ = autorepr(KEY_FIELDS_WITH_TAG) + + def as_dict(self) -> Dict[str, str]: + return { + k: getattr(self, k) for k in self.KEY_FIELDS_WITH_TAG + if getattr(self, k) != ThirdPartyReleaseBase.get_default_field_value(k) + } + + def get_sort_key(self, include_tag: bool = True) -> Tuple[str, ...]: + return tuple( + none_to_empty_string(getattr(self, k)) for k in + (self.KEY_FIELDS_WITH_TAG if include_tag else self.KEY_FIELDS_NO_TAG)) + + @staticmethod + def get_default_field_value(field_name: str) -> Any: + if field_name == 'lto_type': + return None + if field_name == 'is_linuxbrew': + return False + return '' + + +class SkipThirdPartyReleaseException(Exception): + def __init__(self, msg: str) -> None: + super().__init__(msg) + + +class GitHubThirdPartyRelease(ThirdPartyReleaseBase): + github_release: GitRelease + + timestamp: str + url: str + branch_name: Optional[str] + + def __init__(self, github_release: GitRelease, target_commitish: Optional[str] = None) -> None: + self.github_release = github_release + self.sha = target_commitish or self.github_release.target_commitish + + tag = self.github_release.tag_name + if tag.endswith('-snyk-scan'): + raise SkipThirdPartyReleaseException(f"Skipping a tag ending with '-snyk-scan': {tag}") + + tag_match = TAG_RE.match(tag) + if not tag_match: + logging.info(f"Full regular expression for release tags: {TAG_RE_STR}") + raise ValueError(f"Could not parse tag: {tag}, does not match regex: {TAG_RE_STR}") + + group_dict = tag_match.groupdict() + + sha_prefix = tag_match.group('sha_prefix') + if not self.sha.startswith(sha_prefix): + msg = (f"SHA prefix {sha_prefix} extracted from tag {tag} is not a prefix of the " + f"SHA corresponding to the release/tag: {self.sha}. Skipping.") + raise SkipThirdPartyReleaseException(msg) + + self.timestamp = group_dict['timestamp'] + self.os_type = adjust_os_type(group_dict['os']) + + arch1 = group_dict['architecture1'] + arch2 = group_dict['architecture2'] + if arch1 is not None and arch2 is not None and arch1 != arch2: + raise ValueError("Contradicting values of arhitecture in tag '%s'" % tag) + self.architecture = arch1 or arch2 + self.is_linuxbrew = (bool(group_dict.get('is_linuxbrew1')) or + bool(group_dict.get('is_linuxbrew2'))) + + compiler_type = group_dict.get('compiler_type') + if compiler_type is None and self.os_type == 'macos': + compiler_type = 'clang' + if compiler_type is None and self.is_linuxbrew: + compiler_type = 'gcc' + if compiler_type in NUMBER_ONLY_VERSIONS_OF_CLANG: + assert isinstance(compiler_type, str) + compiler_type == 'clang' + compiler_type + + if compiler_type is None: + raise ValueError( + f"Could not determine compiler type from tag {tag}. Matches: {group_dict}.") + compiler_type = compiler_type.strip('-') + self.tag = tag + self.compiler_type = compiler_type + + branch_name = group_dict.get('branch_name') + if branch_name is not None: + branch_name = branch_name.rstrip('-') + self.branch_name = branch_name + + self.lto_type = group_dict.get('lto_type') + + def validate_url(self) -> bool: + asset_urls = [asset.browser_download_url for asset in self.github_release.get_assets()] + + if len(asset_urls) != 2: + logging.warning( + "Expected to find exactly two asset URLs for a release " + "(one for the .tar.gz, the other for the checksum), " + f"but found {len(asset_urls)}: {asset_urls}") + return False + + non_checksum_urls = [url for url in asset_urls if not url.endswith('.sha256')] + assert len(non_checksum_urls) == 1 + self.url = non_checksum_urls[0] + if not self.url.startswith(DOWNLOAD_URL_PREFIX): + logging.warning( + f"Expected archive download URL to start with {DOWNLOAD_URL_PREFIX}, found " + f"{self.url}") + return False + + url_suffix = self.url[len(DOWNLOAD_URL_PREFIX):] + url_suffix_components = url_suffix.split('/') + assert len(url_suffix_components) == 2 + + archive_basename = url_suffix_components[1] + expected_basename = get_archive_name_from_tag(self.tag) + if archive_basename != expected_basename: + logging.warning( + f"Expected archive name based on tag: {expected_basename}, " + f"actual name: {archive_basename}, url: {self.url}") + return False + + return True + + def is_consistent_with_yb_version(self, yb_version: str) -> bool: + return (self.branch_name is None or + yb_version.startswith((self.branch_name + '.', self.branch_name + '-'))) + + def should_skip_as_too_os_specific(self) -> bool: + """ + Certain build types of specific OSes could be skipped because we can use our "preferred OS + type", the supported Linux distribution with the oldest glibc version, instead. We can do + that in cases we know we don't need to run ASAN/TSAN. We know that we don't use ASAN/TSAN + on aarch64 or for LTO builds as of 11/07/2022. Also we don't skip Linuxbrew builds or GCC + builds. + """ + return ( + self.os_type != PREFERRED_OS_TYPE and + self.compiler_type.startswith('clang') and + # We handle Linuxbrew builds in a special way, e.g. they could be built on AlmaLinux 8. + not self.is_linuxbrew and + # We don't run ASAN/TSAN on aarch64 or with LTO yet. + (self.architecture == 'aarch64' or self.lto_type is not None) + ) + + +class ReleaseGroup: + sha: str + releases: List[GitHubThirdPartyRelease] + creation_timestamps: List[datetime] + + def __init__(self, sha: str) -> None: + self.sha = sha + self.releases = [] + self.creation_timestamps = [] + + def add_release(self, release: GitHubThirdPartyRelease) -> None: + if release.sha != self.sha: + raise ValueError( + f"Adding a release with wrong SHA. Expected: {self.sha}, got: " + f"{release.sha}.") + self.releases.append(release) + self.creation_timestamps.append(release.github_release.created_at) + + def get_max_creation_timestamp(self) -> datetime: + return max(self.creation_timestamps) + + def get_min_creation_timestamp(self) -> datetime: + return min(self.creation_timestamps) diff --git a/python/yugabyte/thirdparty_tool.py b/python/yugabyte/thirdparty_tool.py index 27f641cfceaa..8befed3f3910 100755 --- a/python/yugabyte/thirdparty_tool.py +++ b/python/yugabyte/thirdparty_tool.py @@ -16,843 +16,47 @@ This is a command-line tool that allows to get the download URL for a prebuilt third-party dependencies archive for a particular configuration, as well as to update these URLs based on the recent releases in the https://github.com/yugabyte/yugabyte-db-thirdparty repository. + +Another separate area of functionality of this tool is manipulating "inline 3rd party" dependencies +in the src/inline-thirdparty directory. """ import sys -import re import os import logging -import argparse -import ruamel.yaml -import time -import pprint - -from io import StringIO -from autorepr import autorepr # type: ignore - -from github import Github, GithubException -from github.GitRelease import GitRelease from typing import DefaultDict, Dict, List, Any, Optional, Pattern, Tuple, Union, Set, cast -from datetime import datetime from yugabyte.common_util import ( init_logging, - YB_SRC_ROOT, - load_yaml_file, - to_yaml_str, - arg_str_to_bool, make_parent_dir, ) -from yugabyte.file_util import read_file, write_file - -from yugabyte import common_util, arg_util - -from sys_detection import local_sys_conf, SHORT_OS_NAME_REGEX_STR, is_macos - -from collections import defaultdict - -from yugabyte.os_versions import adjust_os_type, is_compatible_os - -ruamel_yaml_object = ruamel.yaml.YAML() - -THIRDPARTY_ARCHIVES_REL_PATH = os.path.join('build-support', 'thirdparty_archives.yml') -MANUAL_THIRDPARTY_ARCHIVES_REL_PATH = os.path.join( - 'build-support', 'thirdparty_archives_manual.yml') - -NUM_TOP_COMMITS = 10 - -DOWNLOAD_URL_PREFIX = 'https://github.com/yugabyte/yugabyte-db-thirdparty/releases/download/' - -ARCH_REGEX_STR = '|'.join(['x86_64', 'aarch64', 'arm64']) - -# These were incorrectly used without the "clang" prefix to indicate various versions of Clang. -NUMBER_ONLY_VERSIONS_OF_CLANG = [str(i) for i in [12, 13, 14]] - -# Linux distribution with the oldest glibc available to us. Third-party archives built on this -# OS can be used on newer Linux distributions, unless we need ASAN/TSAN. -PREFERRED_OS_TYPE = 'amzn2' - - -ThirdPartyArchivesYAML = Dict[str, Union[str, List[Dict[str, str]]]] - - -def get_arch_regex(index: int) -> str: - """ - There are two places where the architecture could appear in the third-party archive release tag. - We make them available under "architecture1" and "architecture2" capture group names. - """ - return r'(?:-(?P%s))?' % (index, ARCH_REGEX_STR) - - -COMPILER_TYPE_RE_STR = (r'(?:-(?P(?:((?:gcc|clang|devtoolset-?)[a-z0-9.]+)|%s)))?' % - '|'.join(NUMBER_ONLY_VERSIONS_OF_CLANG)) - -ALLOWED_LTO_TYPES = ['thin', 'full'] - -TAG_RE_STR = ''.join([ - r'^v(?:(?P[0-9.]+)-)?', - r'(?P[0-9]+)-', - r'(?P[0-9a-f]+)', - get_arch_regex(1), - r'(?:-(?P(?:%s)[a-z0-9.]*))' % SHORT_OS_NAME_REGEX_STR, - get_arch_regex(2), - r'(?:-(?Plinuxbrew))?', - # "devtoolset" really means just "gcc" here. We should replace it with "gcc" in release names. - # Also, "12", "13" and "14" were incorectly used instead of "clang13" in some release archive - # names. - COMPILER_TYPE_RE_STR, - r'(?:-(?Plinuxbrew))?', - r'(?:-(?:(?P%s)-lto))?' % '|'.join(ALLOWED_LTO_TYPES), - r'$', -]) -TAG_RE = re.compile(TAG_RE_STR) - -# We will store the SHA1 to be used for the local third-party checkout, as well as to use by default -# for individual archives unless it is overridden, under this key. -SHA_KEY = 'sha' - -# Skip these problematic tags. -BROKEN_TAGS = set(['v20210907234210-47a70bc7dc-centos7-x86_64-linuxbrew-gcc5']) - -SHA_HASH = re.compile(r'^[0-9a-f]{40}$') - - -def get_archive_name_from_tag(tag: str) -> str: - return f'yugabyte-db-thirdparty-{tag}.tar.gz' - - -def none_to_empty_string(x: Optional[Any]) -> Any: - if x is None: - return '' - return x - - -class ThirdPartyReleaseBase: - # The list of fields without the release tag. The tag is special because it includes the - # timestamp, so by repeating a build on the same commit in yugabyte-db-thirdparty, we could get - # multiple releases that have the same OS/architecture/compiler type/SHA but different tags. - # Therefore we distinguish between "key with tag" and "key with no tag" - KEY_FIELDS_NO_TAG = [ - 'os_type', 'architecture', 'compiler_type', 'is_linuxbrew', 'sha', 'lto_type' - ] - KEY_FIELDS_WITH_TAG = KEY_FIELDS_NO_TAG + ['tag'] - - os_type: str - architecture: str - compiler_type: str - is_linuxbrew: bool - sha: str - tag: str - lto_type: Optional[str] - - __str__ = __repr__ = autorepr(KEY_FIELDS_WITH_TAG) - - def as_dict(self) -> Dict[str, str]: - return { - k: getattr(self, k) for k in self.KEY_FIELDS_WITH_TAG - if getattr(self, k) != ThirdPartyReleaseBase.get_default_field_value(k) - } - - def get_sort_key(self, include_tag: bool = True) -> Tuple[str, ...]: - return tuple( - none_to_empty_string(getattr(self, k)) for k in - (self.KEY_FIELDS_WITH_TAG if include_tag else self.KEY_FIELDS_NO_TAG)) - - @staticmethod - def get_default_field_value(field_name: str) -> Any: - if field_name == 'lto_type': - return None - if field_name == 'is_linuxbrew': - return False - return '' - - -class SkipThirdPartyReleaseException(Exception): - def __init__(self, msg: str) -> None: - super().__init__(msg) - - -class GitHubThirdPartyRelease(ThirdPartyReleaseBase): - github_release: GitRelease - - timestamp: str - url: str - branch_name: Optional[str] - - def __init__(self, github_release: GitRelease, target_commitish: Optional[str] = None) -> None: - self.github_release = github_release - self.sha = target_commitish or self.github_release.target_commitish - - tag = self.github_release.tag_name - if tag.endswith('-snyk-scan'): - raise SkipThirdPartyReleaseException(f"Skipping a tag ending with '-snyk-scan': {tag}") - - tag_match = TAG_RE.match(tag) - if not tag_match: - logging.info(f"Full regular expression for release tags: {TAG_RE_STR}") - raise ValueError(f"Could not parse tag: {tag}, does not match regex: {TAG_RE_STR}") - - group_dict = tag_match.groupdict() - - sha_prefix = tag_match.group('sha_prefix') - if not self.sha.startswith(sha_prefix): - msg = (f"SHA prefix {sha_prefix} extracted from tag {tag} is not a prefix of the " - f"SHA corresponding to the release/tag: {self.sha}. Skipping.") - raise SkipThirdPartyReleaseException(msg) - - self.timestamp = group_dict['timestamp'] - self.os_type = adjust_os_type(group_dict['os']) - - arch1 = group_dict['architecture1'] - arch2 = group_dict['architecture2'] - if arch1 is not None and arch2 is not None and arch1 != arch2: - raise ValueError("Contradicting values of arhitecture in tag '%s'" % tag) - self.architecture = arch1 or arch2 - self.is_linuxbrew = (bool(group_dict.get('is_linuxbrew1')) or - bool(group_dict.get('is_linuxbrew2'))) - - compiler_type = group_dict.get('compiler_type') - if compiler_type is None and self.os_type == 'macos': - compiler_type = 'clang' - if compiler_type is None and self.is_linuxbrew: - compiler_type = 'gcc' - if compiler_type in NUMBER_ONLY_VERSIONS_OF_CLANG: - assert isinstance(compiler_type, str) - compiler_type == 'clang' + compiler_type - - if compiler_type is None: - raise ValueError( - f"Could not determine compiler type from tag {tag}. Matches: {group_dict}.") - compiler_type = compiler_type.strip('-') - self.tag = tag - self.compiler_type = compiler_type - - branch_name = group_dict.get('branch_name') - if branch_name is not None: - branch_name = branch_name.rstrip('-') - self.branch_name = branch_name - - self.lto_type = group_dict.get('lto_type') - - def validate_url(self) -> bool: - asset_urls = [asset.browser_download_url for asset in self.github_release.get_assets()] - - if len(asset_urls) != 2: - logging.warning( - "Expected to find exactly two asset URLs for a release " - "(one for the .tar.gz, the other for the checksum), " - f"but found {len(asset_urls)}: {asset_urls}") - return False - - non_checksum_urls = [url for url in asset_urls if not url.endswith('.sha256')] - assert len(non_checksum_urls) == 1 - self.url = non_checksum_urls[0] - if not self.url.startswith(DOWNLOAD_URL_PREFIX): - logging.warning( - f"Expected archive download URL to start with {DOWNLOAD_URL_PREFIX}, found " - f"{self.url}") - return False - - url_suffix = self.url[len(DOWNLOAD_URL_PREFIX):] - url_suffix_components = url_suffix.split('/') - assert len(url_suffix_components) == 2 - - archive_basename = url_suffix_components[1] - expected_basename = get_archive_name_from_tag(self.tag) - if archive_basename != expected_basename: - logging.warning( - f"Expected archive name based on tag: {expected_basename}, " - f"actual name: {archive_basename}, url: {self.url}") - return False - - return True - - def is_consistent_with_yb_version(self, yb_version: str) -> bool: - return (self.branch_name is None or - yb_version.startswith((self.branch_name + '.', self.branch_name + '-'))) - - def should_skip_as_too_os_specific(self) -> bool: - """ - Certain build types of specific OSes could be skipped because we can use our "preferred OS - type", the supported Linux distribution with the oldest glibc version, instead. We can do - that in cases we know we don't need to run ASAN/TSAN. We know that we don't use ASAN/TSAN - on aarch64 or for LTO builds as of 11/07/2022. Also we don't skip Linuxbrew builds or GCC - builds. - """ - return ( - self.os_type != PREFERRED_OS_TYPE and - self.compiler_type.startswith('clang') and - # We handle Linuxbrew builds in a special way, e.g. they could be built on AlmaLinux 8. - not self.is_linuxbrew and - # We don't run ASAN/TSAN on aarch64 or with LTO yet. - (self.architecture == 'aarch64' or self.lto_type is not None) - ) - - -@ruamel_yaml_object.register_class -class MetadataItem(ThirdPartyReleaseBase): - """ - A metadata item for a third-party download archive loaded from the thirdparty_archives.yml - file. - """ - - def __init__(self, yaml_data: Dict[str, Any]) -> None: - processed_field_names: Set[str] = set() - for field_name in GitHubThirdPartyRelease.KEY_FIELDS_WITH_TAG: - field_value = yaml_data.get(field_name) - if field_value is None: - field_value = ThirdPartyReleaseBase.get_default_field_value(field_name) - setattr(self, field_name, field_value) - processed_field_names.add(field_name) - unknown_fields = set(yaml_data.keys() - processed_field_names) - if unknown_fields: - raise ValueError( - "Unknown fields found in third-party metadata YAML file: %s. " - "Entire item: %s" % (sorted(unknown_fields), pprint.pformat(yaml_data))) - - def url(self) -> str: - return f'{DOWNLOAD_URL_PREFIX}{self.tag}/{get_archive_name_from_tag(self.tag)}' - - -class ReleaseGroup: - sha: str - releases: List[GitHubThirdPartyRelease] - creation_timestamps: List[datetime] - - def __init__(self, sha: str) -> None: - self.sha = sha - self.releases = [] - self.creation_timestamps = [] - - def add_release(self, release: GitHubThirdPartyRelease) -> None: - if release.sha != self.sha: - raise ValueError( - f"Adding a release with wrong SHA. Expected: {self.sha}, got: " - f"{release.sha}.") - self.releases.append(release) - self.creation_timestamps.append(release.github_release.created_at) - - def get_max_creation_timestamp(self) -> datetime: - return max(self.creation_timestamps) - - def get_min_creation_timestamp(self) -> datetime: - return min(self.creation_timestamps) - - -def validate_git_commit(commit: str) -> str: - commit = commit.strip().lower() - if not re.match(r'^[0-9a-f]{40}$', commit): - raise ValueError(f"Invalid Git commit SHA1: {commit}") - return commit - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument( - '--github-token-file', - help='Read GitHub token from this file. Authenticated requests have a higher rate limit. ' - 'If this is not specified, we will still use the GITHUB_TOKEN environment ' - 'variable. The YB_GITHUB_TOKEN_FILE_PATH environment variable, if set, will be used ' - 'as the default value of this argument.', - default=os.getenv('YB_GITHUB_TOKEN_FILE_PATH')) - parser.add_argument( - '--update', '-u', action='store_true', - help=f'Update the third-party archive metadata in in {THIRDPARTY_ARCHIVES_REL_PATH}.') - parser.add_argument( - '--list-compilers', - action='store_true', - help='List compiler types available for the given OS and architecture') - parser.add_argument( - '--get-sha1', - action='store_true', - help='Show the Git SHA1 of the commit to use in the yugabyte-db-thirdparty repo ' - 'in case we are building the third-party dependencies from scratch.') - parser.add_argument( - '--save-thirdparty-url-to-file', - help='Determine the third-party archive download URL for the combination of criteria, ' - 'including the compiler type, and write it to the file specified by this argument.') - parser.add_argument( - '--compiler-type', - help='Compiler type, to help us decide which third-party archive to choose. ' - 'The default value is determined by the YB_COMPILER_TYPE environment variable.', - default=os.getenv('YB_COMPILER_TYPE')) - parser.add_argument( - '--os-type', - help='Operating system type, to help us decide which third-party archive to choose. ' - 'The default value is determined automatically based on the current OS.') - parser.add_argument( - '--architecture', - help='Machine architecture, to help us decide which third-party archive to choose. ' - 'The default value is determined automatically based on the current platform.') - parser.add_argument( - '--is-linuxbrew', - help='Whether the archive shget_download_urlould be based on Linuxbrew.', - type=arg_str_to_bool, - default=None) - parser.add_argument( - '--verbose', - help='Verbose debug information') - parser.add_argument( - '--tag-filter-regex', - help='Only look at tags satisfying this regular expression.') - parser.add_argument( - '--lto', - choices=ALLOWED_LTO_TYPES, - help='Specify link-time optimization type.') - parser.add_argument( - '--also-use-commit', - nargs='+', - type=arg_util.sha1_regex_arg_type, - help='One or more Git commits in the yugabyte-db-thirdparty repository that we should ' - 'find releases for, in addition to the most recent commit in that repository that is ' - 'associated with any of the releases. For use with --update.') - parser.add_argument( - '--allow-older-os', - help='Allow using third-party archives built for an older compatible OS, such as CentOS 7.' - 'This is typically OK, as long as no runtime libraries for e.g. ASAN or UBSAN ' - 'need to be used, which have to be built for the exact same version of OS.', - action='store_true') - parser.add_argument( - '--override-default-sha', - type=arg_util.sha1_regex_arg_type, - help='Use the given SHA at the top of the generated third-party archives file.') - - if len(sys.argv) == 1: - parser.print_help(sys.stderr) - sys.exit(1) - - return parser.parse_args() - - -def get_archive_metadata_file_path() -> str: - return os.path.join(YB_SRC_ROOT, THIRDPARTY_ARCHIVES_REL_PATH) - - -def get_manual_archive_metadata_file_path() -> str: - return os.path.join(YB_SRC_ROOT, MANUAL_THIRDPARTY_ARCHIVES_REL_PATH) - - -def get_github_token(token_file_path: Optional[str]) -> Optional[str]: - github_token: Optional[str] - if token_file_path: - logging.info("Reading GitHub token from %s", token_file_path) - github_token = read_file(token_file_path).strip() - else: - github_token = os.getenv('GITHUB_TOKEN') - if github_token is None: - return github_token - - if len(github_token) != 40: - raise ValueError(f"Invalid GitHub token length: {len(github_token)}, expected 40.") - return github_token - - -class MetadataUpdater: - github_token_file_path: str - tag_filter_pattern: Optional[Pattern] - also_use_commits: List[str] - archive_metadata_path: str - override_default_sha: Optional[str] - - def __init__( - self, - github_token_file_path: str, - tag_filter_regex_str: Optional[str], - also_use_commits: List[str], - override_default_sha: Optional[str]) -> None: - self.github_token_file_path = github_token_file_path - if tag_filter_regex_str: - self.tag_filter_pattern = re.compile(tag_filter_regex_str) - else: - self.tag_filter_pattern = None - self.also_use_commits = also_use_commits - self.archive_metadata_path = get_archive_metadata_file_path() - self.override_default_sha = override_default_sha - - def update_archive_metadata_file(self) -> None: - yb_version = read_file(os.path.join(YB_SRC_ROOT, 'version.txt')).strip() - - logging.info(f"Updating third-party archive metadata file in {self.archive_metadata_path}") - - github_client = Github(get_github_token(self.github_token_file_path)) - repo = github_client.get_repo('yugabyte/yugabyte-db-thirdparty') - - releases_by_commit: Dict[str, ReleaseGroup] = {} - num_skipped_old_tag_format = 0 - num_skipped_wrong_branch = 0 - num_skipped_too_os_specific = 0 - num_releases_found = 0 - - releases = [] - get_releases_start_time_sec = time.time() - try: - for release in repo.get_releases(): - releases.append(release) - except GithubException as exc: - if 'Only the first 1000 results are available.' in str(exc): - logging.info("Ignoring exception: %s", exc) - else: - raise exc - logging.info("Time spent to iterate all releases: %.1f sec", - time.time() - get_releases_start_time_sec) - - for release in releases: - sha: str = release.target_commitish - assert isinstance(sha, str) - - if SHA_HASH.match(sha) is None: - sha = repo.get_commit(sha).sha - - tag_name = release.tag_name - if len(tag_name.split('-')) <= 2: - logging.debug(f"Skipping release tag: {tag_name} (old format, too few components)") - num_skipped_old_tag_format += 1 - continue - if self.tag_filter_pattern and not self.tag_filter_pattern.match(tag_name): - logging.info(f'Skipping tag {tag_name}, does not match the filter') - continue - - try: - yb_dep_release = GitHubThirdPartyRelease(release, target_commitish=sha) - except SkipThirdPartyReleaseException as ex: - logging.warning("Skipping release: %s", ex) - continue - - if not yb_dep_release.is_consistent_with_yb_version(yb_version): - logging.info( - f"Skipping release tag: {tag_name} (does not match version {yb_version})") - num_skipped_wrong_branch += 1 - continue - - if yb_dep_release.should_skip_as_too_os_specific(): - logging.info( - f"Skipping release {yb_dep_release} because it is too specific to a particular " - "version of OS and we could use a build for an older OS instead.") - num_skipped_too_os_specific += 1 - continue - - if sha not in releases_by_commit: - releases_by_commit[sha] = ReleaseGroup(sha) - - num_releases_found += 1 - logging.debug(f"Found release: {yb_dep_release}") - releases_by_commit[sha].add_release(yb_dep_release) - - if num_skipped_old_tag_format > 0: - logging.info(f"Skipped {num_skipped_old_tag_format} releases due to old tag format") - if num_skipped_wrong_branch > 0: - logging.info(f"Skipped {num_skipped_wrong_branch} releases due to branch mismatch") - if num_skipped_too_os_specific > 0: - logging.info(f"Skipped {num_skipped_too_os_specific} releases as too OS-specific") - logging.info( - f"Found {num_releases_found} releases for {len(releases_by_commit)} different commits") - - latest_group_by_max = max( - releases_by_commit.values(), key=ReleaseGroup.get_max_creation_timestamp) - latest_group_by_min = max( - releases_by_commit.values(), key=ReleaseGroup.get_min_creation_timestamp) - if latest_group_by_max is not latest_group_by_min: - raise ValueError( - "Overlapping releases for different commits. No good way to identify latest " - "release: e.g. {latest_group_by_max.sha} and {latest_group_by_min.sha}.") - - latest_group: ReleaseGroup = latest_group_by_max - - latest_release_sha = latest_group.sha - logging.info( - f"Latest released yugabyte-db-thirdparty commit: {latest_release_sha}. " - f"Released at: {latest_group.get_max_creation_timestamp()}.") - - groups_to_use: List[ReleaseGroup] = [latest_group] - - if self.also_use_commits: - for extra_commit in self.also_use_commits: - logging.info(f"Additional manually specified commit to use: {extra_commit}") - if extra_commit == latest_release_sha: - logging.info( - f"(already matches the latest commit {latest_release_sha}, skipping.)") - continue - if extra_commit not in releases_by_commit: - raise ValueError( - f"No releases found for user-specified commit {extra_commit}. " - "Please check if there is an error.") - groups_to_use.append(releases_by_commit[extra_commit]) - - releases_to_use: List[GitHubThirdPartyRelease] = [ - rel for release_group in groups_to_use - for rel in release_group.releases - if rel.tag not in BROKEN_TAGS - ] - - default_sha = self.override_default_sha or latest_release_sha - archives: List[Dict[str, str]] = [] - new_metadata: ThirdPartyArchivesYAML = { - SHA_KEY: default_sha - } - - releases_by_key_without_tag: DefaultDict[Tuple[str, ...], List[GitHubThirdPartyRelease]] = \ - defaultdict(list) - - num_valid_releases = 0 - num_invalid_releases = 0 - for yb_thirdparty_release in releases_to_use: - if yb_thirdparty_release.validate_url(): - num_valid_releases += 1 - releases_by_key_without_tag[ - yb_thirdparty_release.get_sort_key(include_tag=False) - ].append(yb_thirdparty_release) - else: - num_invalid_releases += 1 - logging.info( - f"Valid releases found: {num_valid_releases}, invalid releases: {num_invalid_releases}") - - filtered_releases_to_use = [] - for key_without_tag, releases_for_key in releases_by_key_without_tag.items(): - if len(releases_for_key) > 1: - picked_release = max(releases_for_key, key=lambda r: r.tag) - logging.info( - "Multiple releases found for the same key (excluding the tag). " - "Using the latest one: %s\n" - "Key: %s.\nReleases:\n %s" % ( - picked_release, - key_without_tag, - '\n '.join([str(r) for r in releases_for_key]))) - filtered_releases_to_use.append(picked_release) - else: - filtered_releases_to_use.append(releases_for_key[0]) - - filtered_releases_to_use.sort(key=GitHubThirdPartyRelease.get_sort_key) - - for yb_thirdparty_release in filtered_releases_to_use: - release_as_dict = yb_thirdparty_release.as_dict() - if release_as_dict['sha'] == default_sha: - # To reduce the size of diffs when updating third-party archives YAML file. - del release_as_dict['sha'] - archives.append(release_as_dict) - new_metadata['archives'] = archives - - self.write_metadata_file(new_metadata) - logging.info( - f"Wrote information for {len(filtered_releases_to_use)} pre-built " - f"yugabyte-db-thirdparty archives to {self.archive_metadata_path}.") - - def write_metadata_file( - self, - new_metadata: ThirdPartyArchivesYAML) -> None: - yaml = common_util.get_ruamel_yaml_instance() - string_stream = StringIO() - yaml.dump(new_metadata, string_stream) - yaml_lines = string_stream.getvalue().split('\n') - new_lines = [] - for line in yaml_lines: - if line.startswith(' -'): - new_lines.append('') - new_lines.append(line) - while new_lines and new_lines[-1].strip() == '': - new_lines.pop() - - with open(self.archive_metadata_path, 'w') as output_file: - output_file.write('\n'.join(new_lines) + '\n') - - -def load_metadata_file(file_path: str) -> ThirdPartyArchivesYAML: - data = load_yaml_file(file_path) - default_sha = data.get('sha') - if default_sha is not None: - for archive in data['archives']: - if archive.get('sha', '').strip() == '': - archive['sha'] = default_sha - if 'archives' not in data: - data['archives'] = [] - unexpected_keys = data.keys() - ['archives', 'sha'] - if unexpected_keys: - raise ValueError( - f"Found unexpected keys in third-party archive metadata loaded from file {file_path}. " - f"Details: {pprint.pformat(data)}") - return data - - -def load_metadata() -> ThirdPartyArchivesYAML: - return load_metadata_file(get_archive_metadata_file_path()) - - -def load_manual_metadata() -> ThirdPartyArchivesYAML: - return load_metadata_file(get_manual_archive_metadata_file_path()) - - -def filter_for_os(archive_candidates: List[MetadataItem], os_type: str) -> List[MetadataItem]: - filtered_exactly = [ - candidate for candidate in archive_candidates if candidate.os_type == os_type - ] - if filtered_exactly: - return filtered_exactly - return [ - candidate for candidate in archive_candidates - # is_compatible_os does not take into account that some code built on CentOS 7 might run - # on AlmaLinux 8, etc. It only takes into account the equivalence of various flavors of RHEL - # compatible OSes. - if is_compatible_os(candidate.os_type, os_type) - ] - - -def get_compilers( - metadata_items: List[MetadataItem], - os_type: Optional[str], - architecture: Optional[str], - is_linuxbrew: Optional[bool], - lto: Optional[str], - allow_older_os: bool) -> list: - if not os_type: - os_type = local_sys_conf().short_os_name_and_version() - if not architecture: - architecture = local_sys_conf().architecture - preferred_os_type: Optional[str] = None - if (allow_older_os and - not is_linuxbrew and - os_type != PREFERRED_OS_TYPE and - not os_type.startswith('mac')): - preferred_os_type = PREFERRED_OS_TYPE - - candidates: List[MetadataItem] = [ - metadata_item - for metadata_item in metadata_items - if metadata_item.architecture == architecture and - matches_maybe_empty(metadata_item.lto_type, lto) - ] - - os_candidates = filter_for_os(candidates, os_type) - if preferred_os_type: - candidates_for_preferred_os_type = filter_for_os(candidates, preferred_os_type) - os_candidates.extend(candidates_for_preferred_os_type) - if is_linuxbrew is not None: - os_candidates = [ - candidate for candidate in os_candidates - if candidate.is_linuxbrew == is_linuxbrew - ] - - compilers = sorted(set([metadata_item.compiler_type for metadata_item in os_candidates])) - - return compilers - - -def matches_maybe_empty(a: Optional[str], b: Optional[str]) -> bool: - return (a or '') == (b or '') - - -def compiler_type_matches(a: str, b: str) -> bool: - ''' - >>> compiler_type_matches('clang10', 'clang10') - True - >>> compiler_type_matches('clang14', 'gcc11') - False - >>> compiler_type_matches('12', 'clang12') - True - >>> compiler_type_matches('clang12', '12') - True - >>> compiler_type_matches('clang12', '14') - False - ''' - if a == b: - return True - if a > b: - return compiler_type_matches(b, a) - return a in NUMBER_ONLY_VERSIONS_OF_CLANG and b == 'clang' + a - - -def get_third_party_release( - available_archives: List[MetadataItem], - compiler_type: str, - os_type: Optional[str], - architecture: Optional[str], - is_linuxbrew: Optional[bool], - lto: Optional[str], - allow_older_os: bool) -> MetadataItem: - if not os_type: - os_type = local_sys_conf().short_os_name_and_version() - preferred_os_type: Optional[str] = None - if (allow_older_os and - not is_linuxbrew and - os_type != PREFERRED_OS_TYPE and - not os_type.startswith('mac')): - preferred_os_type = PREFERRED_OS_TYPE - - if not architecture: - architecture = local_sys_conf().architecture - - needed_compiler_type = compiler_type - - candidates: List[Any] = [ - archive for archive in available_archives - if compiler_type_matches(archive.compiler_type, needed_compiler_type) and - archive.architecture == architecture and - matches_maybe_empty(archive.lto_type, lto) - ] - - if is_linuxbrew is not None: - candidates = [ - candidate for candidate in candidates - if candidate.is_linuxbrew == is_linuxbrew - ] - - if is_linuxbrew is None or not is_linuxbrew or len(candidates) > 1: - # If a Linuxbrew archive is requested, we don't have to filter by OS, because archives - # should be OS-independent. But still do that if we have more than one candidate. - # - # Also, if we determine that we would rather use a "preferred OS type" (an old version of - # Linux that allows us to produce "universal packages"), we try to use it first. - - filtered_for_os = False - if preferred_os_type: - candidates_for_preferred_os_type = filter_for_os(candidates, preferred_os_type) - if candidates_for_preferred_os_type: - candidates = candidates_for_preferred_os_type - filtered_for_os = True - - if not filtered_for_os: - candidates = filter_for_os(candidates, os_type) - - if len(candidates) == 1: - return candidates[0] - - if candidates: - i = 1 - for candidate in candidates: - logging.warning("Third-party release archive candidate #%d: %s", i, candidate) - i += 1 - wrong_count_str = 'more than one' - else: - if (is_macos() and - os_type == 'macos' and - compiler_type.startswith('clang') and - compiler_type != 'clang'): - return get_third_party_release( - available_archives=available_archives, - compiler_type='clang', - os_type=os_type, - architecture=architecture, - is_linuxbrew=False, - lto=lto, - allow_older_os=False) - logging.info(f"Available release archives:\n{to_yaml_str(available_archives)}") - wrong_count_str = 'no' +from yugabyte.file_util import write_file +from yugabyte.thirdparty_tool_impl import ( + get_compilers, + get_third_party_release, + parse_args, + update_thirdparty_dependencies, +) +from yugabyte.thirdparty_archives_metadata import ( + load_manual_metadata, + load_metadata, + MetadataItem, + SHA_KEY, +) - raise ValueError( - f"Found {wrong_count_str} third-party release archives to download for OS type " - f"{os_type}, compiler type matching {compiler_type}, architecture {architecture}, " - f"is_linuxbrew={is_linuxbrew}. See more details above.") +from yugabyte import inline_thirdparty def main() -> None: args = parse_args() init_logging(verbose=args.verbose) if args.update: - updater = MetadataUpdater( - github_token_file_path=args.github_token_file, - tag_filter_regex_str=args.tag_filter_regex, - also_use_commits=args.also_use_commit, - override_default_sha=args.override_default_sha) - updater.update_archive_metadata_file() + update_thirdparty_dependencies(args) + return + + if args.sync_inline_thirdparty: + inline_thirdparty.sync_inline_thirdparty() return metadata = load_metadata() diff --git a/python/yugabyte/thirdparty_tool_impl.py b/python/yugabyte/thirdparty_tool_impl.py new file mode 100644 index 000000000000..0e27110a8496 --- /dev/null +++ b/python/yugabyte/thirdparty_tool_impl.py @@ -0,0 +1,300 @@ +# Copyright (c) YugabyteDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations +# under the License. + +from typing import List, Optional, Any + +import argparse +import logging +import os +import sys + +from sys_detection import local_sys_conf, is_macos + +from yugabyte import arg_util + +from yugabyte.common_util import ( + to_yaml_str, + arg_str_to_bool, +) + +from yugabyte.thirdparty_archives_metadata import ( + THIRDPARTY_ARCHIVES_REL_PATH, + MetadataItem, +) + +from yugabyte.thirdparty_releases import ( + ALLOWED_LTO_TYPES, + NUMBER_ONLY_VERSIONS_OF_CLANG, +) + +from yugabyte.os_versions import is_compatible_os +from yugabyte.string_util import matches_maybe_empty + +from yugabyte.thirdparty_releases import PREFERRED_OS_TYPE + +from yugabyte.thirdparty_archives_metadata import MetadataUpdater + +from yugabyte import inline_thirdparty + + +def parse_args() -> argparse.Namespace: + # TODO: refactor this to use submodules. + + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + '--github-token-file', + help='Read GitHub token from this file. Authenticated requests have a higher rate limit. ' + 'If this is not specified, we will still use the GITHUB_TOKEN environment ' + 'variable. The YB_GITHUB_TOKEN_FILE_PATH environment variable, if set, will be used ' + 'as the default value of this argument.', + default=os.getenv('YB_GITHUB_TOKEN_FILE_PATH')) + parser.add_argument( + '--update', '-u', action='store_true', + help=f'Update the third-party archive metadata in in {THIRDPARTY_ARCHIVES_REL_PATH}.') + parser.add_argument( + '--list-compilers', + action='store_true', + help='List compiler types available for the given OS and architecture') + parser.add_argument( + '--get-sha1', + action='store_true', + help='Show the Git SHA1 of the commit to use in the yugabyte-db-thirdparty repo ' + 'in case we are building the third-party dependencies from scratch.') + parser.add_argument( + '--save-thirdparty-url-to-file', + help='Determine the third-party archive download URL for the combination of criteria, ' + 'including the compiler type, and write it to the file specified by this argument.') + parser.add_argument( + '--compiler-type', + help='Compiler type, to help us decide which third-party archive to choose. ' + 'The default value is determined by the YB_COMPILER_TYPE environment variable.', + default=os.getenv('YB_COMPILER_TYPE')) + parser.add_argument( + '--os-type', + help='Operating system type, to help us decide which third-party archive to choose. ' + 'The default value is determined automatically based on the current OS.') + parser.add_argument( + '--architecture', + help='Machine architecture, to help us decide which third-party archive to choose. ' + 'The default value is determined automatically based on the current platform.') + parser.add_argument( + '--is-linuxbrew', + help='Whether the archive should be based on Linuxbrew.', + type=arg_str_to_bool, + default=None) + parser.add_argument( + '--verbose', + help='Verbose debug information') + parser.add_argument( + '--tag-filter-regex', + help='Only look at tags satisfying this regular expression.') + parser.add_argument( + '--lto', + choices=ALLOWED_LTO_TYPES, + help='Specify link-time optimization type.') + parser.add_argument( + '--also-use-commit', + nargs='+', + type=arg_util.sha1_regex_arg_type, + help='One or more Git commits in the yugabyte-db-thirdparty repository that we should ' + 'find releases for, in addition to the most recent commit in that repository that is ' + 'associated with any of the releases. For use with --update.') + parser.add_argument( + '--allow-older-os', + help='Allow using third-party archives built for an older compatible OS, such as CentOS 7.' + 'This is typically OK, as long as no runtime libraries for e.g. ASAN or UBSAN ' + 'need to be used, which have to be built for the exact same version of OS.', + action='store_true') + parser.add_argument( + '--override-default-sha', + type=arg_util.sha1_regex_arg_type, + help='Use the given SHA at the top of the generated third-party archives file.') + parser.add_argument( + '--sync-inline-thirdparty', + action='store_true', + help='Sync the inline third-party dependencies directory (%s) with the metadata in %s.' % ( + inline_thirdparty.INLINE_THIRDPARTY_SRC_DIR, + inline_thirdparty.INLINE_THIRDPARTY_CONFIG_PATH + )) + + if len(sys.argv) == 1: + parser.print_help(sys.stderr) + sys.exit(1) + + return parser.parse_args() + + +def filter_for_os(archive_candidates: List[MetadataItem], os_type: str) -> List[MetadataItem]: + filtered_exactly = [ + candidate for candidate in archive_candidates if candidate.os_type == os_type + ] + if filtered_exactly: + return filtered_exactly + return [ + candidate for candidate in archive_candidates + # is_compatible_os does not take into account that some code built on CentOS 7 might run + # on AlmaLinux 8, etc. It only takes into account the equivalence of various flavors of RHEL + # compatible OSes. + if is_compatible_os(candidate.os_type, os_type) + ] + + +def get_compilers( + metadata_items: List[MetadataItem], + os_type: Optional[str], + architecture: Optional[str], + is_linuxbrew: Optional[bool], + lto: Optional[str], + allow_older_os: bool) -> list: + if not os_type: + os_type = local_sys_conf().short_os_name_and_version() + if not architecture: + architecture = local_sys_conf().architecture + preferred_os_type: Optional[str] = None + if (allow_older_os and + not is_linuxbrew and + os_type != PREFERRED_OS_TYPE and + not os_type.startswith('mac')): + preferred_os_type = PREFERRED_OS_TYPE + + candidates: List[MetadataItem] = [ + metadata_item + for metadata_item in metadata_items + if metadata_item.architecture == architecture and + matches_maybe_empty(metadata_item.lto_type, lto) + ] + + os_candidates = filter_for_os(candidates, os_type) + if preferred_os_type: + candidates_for_preferred_os_type = filter_for_os(candidates, preferred_os_type) + os_candidates.extend(candidates_for_preferred_os_type) + if is_linuxbrew is not None: + os_candidates = [ + candidate for candidate in os_candidates + if candidate.is_linuxbrew == is_linuxbrew + ] + + compilers = sorted(set([metadata_item.compiler_type for metadata_item in os_candidates])) + + return compilers + + +def compiler_type_matches(a: str, b: str) -> bool: + ''' + >>> compiler_type_matches('clang10', 'clang10') + True + >>> compiler_type_matches('clang14', 'gcc11') + False + >>> compiler_type_matches('12', 'clang12') + True + >>> compiler_type_matches('clang12', '12') + True + >>> compiler_type_matches('clang12', '14') + False + ''' + if a == b: + return True + if a > b: + return compiler_type_matches(b, a) + return a in NUMBER_ONLY_VERSIONS_OF_CLANG and b == 'clang' + a + + +def get_third_party_release( + available_archives: List[MetadataItem], + compiler_type: str, + os_type: Optional[str], + architecture: Optional[str], + is_linuxbrew: Optional[bool], + lto: Optional[str], + allow_older_os: bool) -> MetadataItem: + if not os_type: + os_type = local_sys_conf().short_os_name_and_version() + preferred_os_type: Optional[str] = None + if (allow_older_os and + not is_linuxbrew and + os_type != PREFERRED_OS_TYPE and + not os_type.startswith('mac')): + preferred_os_type = PREFERRED_OS_TYPE + + if not architecture: + architecture = local_sys_conf().architecture + + needed_compiler_type = compiler_type + + candidates: List[Any] = [ + archive for archive in available_archives + if compiler_type_matches(archive.compiler_type, needed_compiler_type) and + archive.architecture == architecture and + matches_maybe_empty(archive.lto_type, lto) + ] + + if is_linuxbrew is not None: + candidates = [ + candidate for candidate in candidates + if candidate.is_linuxbrew == is_linuxbrew + ] + + if is_linuxbrew is None or not is_linuxbrew or len(candidates) > 1: + # If a Linuxbrew archive is requested, we don't have to filter by OS, because archives + # should be OS-independent. But still do that if we have more than one candidate. + # + # Also, if we determine that we would rather use a "preferred OS type" (an old version of + # Linux that allows us to produce "universal packages"), we try to use it first. + + filtered_for_os = False + if preferred_os_type: + candidates_for_preferred_os_type = filter_for_os(candidates, preferred_os_type) + if candidates_for_preferred_os_type: + candidates = candidates_for_preferred_os_type + filtered_for_os = True + + if not filtered_for_os: + candidates = filter_for_os(candidates, os_type) + + if len(candidates) == 1: + return candidates[0] + + if candidates: + i = 1 + for candidate in candidates: + logging.warning("Third-party release archive candidate #%d: %s", i, candidate) + i += 1 + wrong_count_str = 'more than one' + else: + if (is_macos() and + os_type == 'macos' and + compiler_type.startswith('clang') and + compiler_type != 'clang'): + return get_third_party_release( + available_archives=available_archives, + compiler_type='clang', + os_type=os_type, + architecture=architecture, + is_linuxbrew=False, + lto=lto, + allow_older_os=False) + logging.info(f"Available release archives:\n{to_yaml_str(available_archives)}") + wrong_count_str = 'no' + + raise ValueError( + f"Found {wrong_count_str} third-party release archives to download for OS type " + f"{os_type}, compiler type matching {compiler_type}, architecture {architecture}, " + f"is_linuxbrew={is_linuxbrew}. See more details above.") + + +def update_thirdparty_dependencies(args: argparse.Namespace) -> None: + updater = MetadataUpdater( + github_token_file_path=args.github_token_file, + tag_filter_regex_str=args.tag_filter_regex, + also_use_commits=args.also_use_commit, + override_default_sha=args.override_default_sha) + updater.update_archive_metadata_file() diff --git a/src/inline-thirdparty/README.md b/src/inline-thirdparty/README.md index 67d33eae2eba..d34b48ac2cdd 100644 --- a/src/inline-thirdparty/README.md +++ b/src/inline-thirdparty/README.md @@ -5,25 +5,23 @@ them to the yugabyte-db-thirdparty repo. We also only copy the relevant subdirec repositories. Each library is copied in its own appropriately named directory, and each library's directory is added separately to the list of include directories in CMakeLists.txt. -We will probably create a tool to manage these dependencies automatically in the future. +The list of inline third-party dependencies is specified in build-support/inline_thirdparty.yml. +Each dependency has the following fields: +- name: The name of the dependency. +- git_url: upstream git URL, typically github, either a YugabyteDB fork of the upstream repo, or + the official repository. +- tag or commit: The git tag or commit to ise. +- src_dir: the directory within the upstream repository to copy. +- dest_dir: the target directory to copy the upstream repository to, relative to + src/inline-thirdparty. This has to start with the dependency name as the first path component + for clarity, but could be a deeper directory. -* usearch - * Repo: https://github.com/yugabyte/usearch - * Description: Similarity search for vector and text - * Subdirectory: include - * Tag: v2.11.0-yb-1 - * License: Apache 2.0 +To update inline third-party dependencies, modify the tag or comimt in inline_thirdparty.yml, +commit the changes, and run the following command: -* fp16 - * Repo: https://github.com/Maratyszcza/FP16/ - * Description: Header-only library for conversion to/from half-precision floating point formats - * Subdirectory: include - * Commit: 0a92994d729ff76a58f692d3028ca1b64b145d91 - * License: MIT +build-support/thirdparty_tool --sync-inline-thirdparty -* hnswlib - * Repo: https://github.com/nmslib/hnswlib - * Description: Header-only C++/python library for fast approximate nearest neighbors - * Subdirectory: hnswlib - * Commit: 2142dc6f4dd08e64ab727a7bbd93be7f732e80b0 - * License: Apache 2.0 +This requires the absence of local changes in the git repository. This will create one or more +commits for each dependency. These commits should be submitted as a Phabricator diff for review, +tested in Jenkins, and then landed. The changes to inline-thirdparty should be pushed as +separate commits, not squashed together. From 1977cf75ddeb8e8a4b0229c833b3cf0a65786683 Mon Sep 17 00:00:00 2001 From: Anubhav Srivastava Date: Wed, 11 Sep 2024 13:19:24 -0700 Subject: [PATCH 14/75] [#23766] docdb: Get colocated schema_version correctly in ChangeMetadataOperation Summary: The code was previously always getting the parent table schema version in the apply of ChangeMetadataOperation. This caused CREATE INDEX on a newly created table to fail because the alter on the primary table (to add the index) was ignored because `ChangeMetadataOperation::Apply()` thinks the schema version is already 1 (even though it is 0; 1 is the schema version of the parent table). This diff changes `ChangeMetadataOperation::Apply()` to get the colocated table's schema version correctly, and refactors the functions to get the schema version so the choice of whether to get the primary table schema version or a colocated table's schema version is more explicit. The only functional change is in the `ChangeMetadataOperation` and the modified test. Jira: DB-12667 Test Plan: `./yb_build.sh release --cxx-test integration-tests_minicluster-snapshot-test --gtest_filter *CreateTableAfterClone/1` Reviewers: mhaddad Reviewed By: mhaddad Subscribers: ybase Differential Revision: https://phorge.dev.yugabyte.com/D37994 --- src/yb/client/client-test.cc | 8 +++--- src/yb/integration-tests/alter_table-test.cc | 18 ++++++------- .../minicluster-snapshot-test.cc | 15 +++++++---- src/yb/tablet/local_tablet_writer.cc | 2 +- .../operations/change_metadata_operation.cc | 25 +++++++++++++++++-- src/yb/tablet/tablet-test-util.cc | 2 +- src/yb/tablet/tablet.cc | 8 +++--- src/yb/tablet/tablet_bootstrap.cc | 2 +- src/yb/tablet/tablet_metadata.cc | 8 ++++-- src/yb/tablet/tablet_metadata.h | 5 +++- src/yb/tablet/tablet_peer-test.cc | 2 +- src/yb/tablet/write_query.cc | 14 +++++------ src/yb/tools/fs_tool.cc | 3 ++- src/yb/tserver/read_query.cc | 3 +-- src/yb/tserver/tablet_service.cc | 4 ++- src/yb/tserver/ts_tablet_manager.cc | 3 ++- 16 files changed, 79 insertions(+), 43 deletions(-) diff --git a/src/yb/client/client-test.cc b/src/yb/client/client-test.cc index 67e22e7e2716..3edd0a180f03 100644 --- a/src/yb/client/client-test.cc +++ b/src/yb/client/client-test.cc @@ -1585,7 +1585,7 @@ TEST_F(ClientTest, TestBasicAlterOperations) { ->AddColumn("new_col")->Type(DataType::INT32); ASSERT_OK(table_alterer->Alter()); // TODO(nspiegelberg): The below assert is flakey because of KUDU-1539. - ASSERT_EQ(1, tablet_peer->tablet()->metadata()->schema_version()); + ASSERT_EQ(1, tablet_peer->tablet()->metadata()->primary_table_schema_version()); } { @@ -1595,7 +1595,7 @@ TEST_F(ClientTest, TestBasicAlterOperations) { ->RenameTo(kRenamedTableName) ->Alter()); // TODO(nspiegelberg): The below assert is flakey because of KUDU-1539. - ASSERT_EQ(2, tablet_peer->tablet()->metadata()->schema_version()); + ASSERT_EQ(2, tablet_peer->tablet()->metadata()->primary_table_schema_version()); ASSERT_EQ(kRenamedTableName.table_name(), tablet_peer->tablet()->metadata()->table_name()); const auto tables = ASSERT_RESULT(client_->ListTables()); @@ -1918,11 +1918,11 @@ TEST_F(ClientTest, TestReplicatedTabletWritesAndAltersWithLeaderElection) { { auto tablet_peer = ASSERT_RESULT( new_leader->server()->tablet_manager()->GetTablet(remote_tablet->tablet_id())); - auto old_version = tablet_peer->tablet()->metadata()->schema_version(); + auto old_version = tablet_peer->tablet()->metadata()->primary_table_schema_version(); std::unique_ptr table_alterer(client_->NewTableAlterer(kReplicatedTable)); table_alterer->AddColumn("new_col")->Type(DataType::INT32); ASSERT_OK(table_alterer->Alter()); - ASSERT_EQ(old_version + 1, tablet_peer->tablet()->metadata()->schema_version()); + ASSERT_EQ(old_version + 1, tablet_peer->tablet()->metadata()->primary_table_schema_version()); } } diff --git a/src/yb/integration-tests/alter_table-test.cc b/src/yb/integration-tests/alter_table-test.cc index c25d3141cfca..3735a329476d 100644 --- a/src/yb/integration-tests/alter_table-test.cc +++ b/src/yb/integration-tests/alter_table-test.cc @@ -307,14 +307,14 @@ INSTANTIATE_TEST_CASE_P(BatchSize, ReplicatedAlterTableTest, ::testing::Values(1 // on the TS handling the tablet of the altered table. // TODO: create and verify multiple tablets when the client will support that. TEST_P(AlterTableTest, TestTabletReports) { - ASSERT_EQ(0, tablet_peer_->tablet()->metadata()->schema_version()); + ASSERT_EQ(0, tablet_peer_->tablet()->metadata()->primary_table_schema_version()); ASSERT_OK(AddNewI32Column(kTableName, "new-i32")); - ASSERT_EQ(1, tablet_peer_->tablet()->metadata()->schema_version()); + ASSERT_EQ(1, tablet_peer_->tablet()->metadata()->primary_table_schema_version()); } // Verify that adding an existing column will return an "already present" error TEST_P(AlterTableTest, TestAddExistingColumn) { - ASSERT_EQ(0, tablet_peer_->tablet()->metadata()->schema_version()); + ASSERT_EQ(0, tablet_peer_->tablet()->metadata()->primary_table_schema_version()); { Status s = AddNewI32Column(kTableName, "c1"); @@ -322,7 +322,7 @@ TEST_P(AlterTableTest, TestAddExistingColumn) { ASSERT_STR_CONTAINS(s.ToString(), "The column already exists: c1"); } - ASSERT_EQ(0, tablet_peer_->tablet()->metadata()->schema_version()); + ASSERT_EQ(0, tablet_peer_->tablet()->metadata()->primary_table_schema_version()); } // Adding a nullable column with no default value should be equivalent @@ -348,7 +348,7 @@ TEST_P(AlterTableTest, TestAddNullableColumnWithoutDefault) { // Verify that, if a tablet server is down when an alter command is issued, // it will eventually receive the command when it restarts. TEST_P(AlterTableTest, TestAlterOnTSRestart) { - ASSERT_EQ(0, tablet_peer_->tablet()->metadata()->schema_version()); + ASSERT_EQ(0, tablet_peer_->tablet()->metadata()->primary_table_schema_version()); ShutdownTS(); @@ -374,13 +374,13 @@ TEST_P(AlterTableTest, TestAlterOnTSRestart) { // Restart the TS and wait for the new schema RestartTabletServer(); ASSERT_OK(WaitAlterTableCompletion(kTableName, 50)); - ASSERT_EQ(1, tablet_peer_->tablet()->metadata()->schema_version()); + ASSERT_EQ(1, tablet_peer_->tablet()->metadata()->primary_table_schema_version()); } // Verify that nothing is left behind on cluster shutdown with pending async tasks TEST_P(AlterTableTest, TestShutdownWithPendingTasks) { DontVerifyClusterBeforeNextTearDown(); - ASSERT_EQ(0, tablet_peer_->tablet()->metadata()->schema_version()); + ASSERT_EQ(0, tablet_peer_->tablet()->metadata()->primary_table_schema_version()); ShutdownTS(); @@ -402,7 +402,7 @@ TEST_P(AlterTableTest, TestRestartTSDuringAlter) { return; } - ASSERT_EQ(0, tablet_peer_->tablet()->metadata()->schema_version()); + ASSERT_EQ(0, tablet_peer_->tablet()->metadata()->primary_table_schema_version()); Status s = AddNewI32Column(kTableName, "new-i32", MonoDelta::FromMilliseconds(1)); ASSERT_TRUE(s.IsTimedOut()); @@ -415,7 +415,7 @@ TEST_P(AlterTableTest, TestRestartTSDuringAlter) { // Wait for the new schema ASSERT_OK(WaitAlterTableCompletion(kTableName, 50)); - ASSERT_EQ(1, tablet_peer_->tablet()->metadata()->schema_version()); + ASSERT_EQ(1, tablet_peer_->tablet()->metadata()->primary_table_schema_version()); } TEST_P(AlterTableTest, TestGetSchemaAfterAlterTable) { diff --git a/src/yb/integration-tests/minicluster-snapshot-test.cc b/src/yb/integration-tests/minicluster-snapshot-test.cc index 9151fe89c16d..e0aa72d10923 100644 --- a/src/yb/integration-tests/minicluster-snapshot-test.cc +++ b/src/yb/integration-tests/minicluster-snapshot-test.cc @@ -1000,20 +1000,25 @@ TEST_P(PgCloneTestWithColocatedDBParam, YB_DISABLE_TEST_IN_SANITIZERS(CreateTabl ASSERT_OK(source_conn_->ExecuteFormat("INSERT INTO t1 VALUES (1, 1)")); auto clone_time = ASSERT_RESULT(GetCurrentTime()).ToInt64(); - ASSERT_OK(source_conn_->Execute("CREATE TABLE t2 (k int, v1 int)")); + ASSERT_OK(source_conn_->Execute("CREATE TABLE t2 (k int, value int)")); + ASSERT_OK(source_conn_->Execute("CREATE INDEX i2 on t2(value)")); - // Clone before t2 was created and test that we can recreate t2. + // Clone before t2 and i2 were created. ASSERT_OK(source_conn_->ExecuteFormat( "CREATE DATABASE $0 TEMPLATE $1 AS OF $2", kTargetNamespaceName1, kSourceNamespaceName, clone_time)); auto target_conn = ASSERT_RESULT(ConnectToDB(kTargetNamespaceName1)); - ASSERT_OK(target_conn.Execute("CREATE TABLE t2 (k int, v1 int)")); - // Should be able to create new tables and indexes and insert into all tables. + // Test that we can recreate dropped tables and create brand new tables, with indexes. + ASSERT_OK(target_conn.Execute("CREATE TABLE t2 (k int, value int)")); + ASSERT_OK(target_conn.Execute("CREATE TABLE t3 (k int, value int)")); ASSERT_OK(target_conn.Execute("CREATE INDEX i1 on t1(value)")); + ASSERT_OK(target_conn.Execute("CREATE INDEX i2 on t2(value)")); + ASSERT_OK(target_conn.Execute("CREATE INDEX i3 on t3(value)")); + + // Test that we can insert into all tables. ASSERT_OK(target_conn.Execute("INSERT INTO t1 VALUES (2, 2)")); ASSERT_OK(target_conn.Execute("INSERT INTO t2 VALUES (1, 1)")); - ASSERT_OK(target_conn.Execute("CREATE TABLE t3 (k int, v1 int)")); ASSERT_OK(target_conn.Execute("INSERT INTO t3 VALUES (1, 1)")); } diff --git a/src/yb/tablet/local_tablet_writer.cc b/src/yb/tablet/local_tablet_writer.cc index af0410887fd7..ebbaf6ebba02 100644 --- a/src/yb/tablet/local_tablet_writer.cc +++ b/src/yb/tablet/local_tablet_writer.cc @@ -63,7 +63,7 @@ Status LocalTabletWriter::Write(QLWriteRequestPB* request) { Status LocalTabletWriter::WriteBatch(Batch* batch) { req_->Clear(); for (auto& req : *batch) { - req.set_schema_version(tablet_->metadata()->schema_version()); + req.set_schema_version(tablet_->metadata()->primary_table_schema_version()); QLSetHashCode(&req); } req_->mutable_ql_write_batch()->Swap(batch); diff --git a/src/yb/tablet/operations/change_metadata_operation.cc b/src/yb/tablet/operations/change_metadata_operation.cc index a1d35e0503aa..1f7a4fb0d58d 100644 --- a/src/yb/tablet/operations/change_metadata_operation.cc +++ b/src/yb/tablet/operations/change_metadata_operation.cc @@ -160,9 +160,30 @@ Status ChangeMetadataOperation::Apply(int64_t leader_term, Status* complete_stat MetadataChange metadata_change = MetadataChange::NONE; bool request_has_newer_schema = false; + SchemaVersion current_schema_version = std::numeric_limits::max(); if (request()->has_schema()) { + const auto& schema = request()->schema(); metadata_change = MetadataChange::SCHEMA; - request_has_newer_schema = tablet->metadata()->schema_version() < schema_version(); + if (schema.has_colocated_table_id()) { + switch (schema.colocated_table_id().value_case()) { + case ColocatedTableIdentifierPB::kCotableId: { + auto uuid = VERIFY_RESULT(Uuid::FromSlice(schema.colocated_table_id().cotable_id())); + current_schema_version = VERIFY_RESULT(tablet->metadata()->schema_version(uuid)); + break; + } + case ColocatedTableIdentifierPB::kColocationId: + current_schema_version = VERIFY_RESULT(tablet->metadata()->schema_version( + schema.colocated_table_id().colocation_id())); + break; + case ColocatedTableIdentifierPB::VALUE_NOT_SET: + // Not set means we should use the parent table schema version. + current_schema_version = tablet->metadata()->primary_table_schema_version(); + break; + } + } else { + current_schema_version = tablet->metadata()->primary_table_schema_version(); + } + request_has_newer_schema = current_schema_version < schema_version(); if (request_has_newer_schema) { ++num_operations; } @@ -207,7 +228,7 @@ Status ChangeMetadataOperation::Apply(int64_t leader_term, Status* complete_stat case MetadataChange::SCHEMA: if (!request_has_newer_schema) { LOG_WITH_PREFIX(INFO) - << "Already running schema version " << tablet->metadata()->schema_version() + << "Already running schema version " << current_schema_version << " got alter request for version " << schema_version(); break; } diff --git a/src/yb/tablet/tablet-test-util.cc b/src/yb/tablet/tablet-test-util.cc index 5c2992bad176..1e2c96de84ed 100644 --- a/src/yb/tablet/tablet-test-util.cc +++ b/src/yb/tablet/tablet-test-util.cc @@ -72,7 +72,7 @@ void YBTabletTest::SetUpTestTablet(const std::string& root_dir) { void YBTabletTest::AlterSchema(const Schema& schema) { ThreadSafeArena arena; LWChangeMetadataRequestPB req(&arena); - req.set_schema_version(tablet()->metadata()->schema_version() + 1); + req.set_schema_version(tablet()->metadata()->primary_table_schema_version() + 1); ChangeMetadataOperation operation(nullptr, nullptr, &req); ASSERT_OK(tablet()->CreatePreparedChangeMetadata( diff --git a/src/yb/tablet/tablet.cc b/src/yb/tablet/tablet.cc index d4d4c5802e48..cb49192e65e7 100644 --- a/src/yb/tablet/tablet.cc +++ b/src/yb/tablet/tablet.cc @@ -635,7 +635,7 @@ Tablet::Tablet(const TabletInitData& data) get_min_xcluster_schema_version_(std::move(data.get_min_xcluster_schema_version)) { CHECK(schema()->has_column_ids()); LOG_WITH_PREFIX(INFO) << "Schema version for " << metadata_->table_name() << " is " - << metadata_->schema_version(); + << metadata_->primary_table_schema_version(); if (data.metric_registry) { MetricEntity::AttributeMap attrs; @@ -1724,7 +1724,7 @@ Status Tablet::HandleQLReadRequest( docdb::QLRocksDBStorage storage{doc_db(metrics_scope.metrics())}; bool schema_version_compatible = IsSchemaVersionCompatible( - metadata()->schema_version(), ql_read_request.schema_version(), + metadata()->primary_table_schema_version(), ql_read_request.schema_version(), ql_read_request.is_compatible_with_previous_version()); Status status; @@ -1737,7 +1737,7 @@ Status Tablet::HandleQLReadRequest( *txn_op_ctx, storage, scoped_read_operation, result, rows_data); schema_version_compatible = IsSchemaVersionCompatible( - metadata()->schema_version(), ql_read_request.schema_version(), + metadata()->primary_table_schema_version(), ql_read_request.schema_version(), ql_read_request.is_compatible_with_previous_version()); } @@ -1748,7 +1748,7 @@ Status Tablet::HandleQLReadRequest( result->response.set_error_message(Format( "schema version mismatch for table $0: expected $1, got $2 (compt with prev: $3)", metadata()->table_id(), - metadata()->schema_version(), + metadata()->primary_table_schema_version(), ql_read_request.schema_version(), ql_read_request.is_compatible_with_previous_version())); return Status::OK(); diff --git a/src/yb/tablet/tablet_bootstrap.cc b/src/yb/tablet/tablet_bootstrap.cc index 6664f0af80ad..7cb76d3094b7 100644 --- a/src/yb/tablet/tablet_bootstrap.cc +++ b/src/yb/tablet/tablet_bootstrap.cc @@ -853,7 +853,7 @@ class TabletBootstrap { metadata.wal_dir(), metadata.fs_manager()->uuid(), *tablet_->schema(), - metadata.schema_version(), + metadata.primary_table_schema_version(), tablet_->GetTableMetricsEntity(), tablet_->GetTabletMetricsEntity(), append_pool_, diff --git a/src/yb/tablet/tablet_metadata.cc b/src/yb/tablet/tablet_metadata.cc index e47cd84f7cb0..b72c62d2734f 100644 --- a/src/yb/tablet/tablet_metadata.cc +++ b/src/yb/tablet/tablet_metadata.cc @@ -2064,6 +2064,10 @@ void RaftGroupMetadata::GetTableIdToSchemaVersionMap( } } +SchemaVersion RaftGroupMetadata::primary_table_schema_version() const { + return schema_version(""); +} + SchemaVersion RaftGroupMetadata::schema_version(const TableId& table_id) const { DCHECK_NE(state_, kNotLoadedYet); const TableInfoPtr table_info = CHECK_RESULT(GetTableInfo(table_id)); @@ -2082,8 +2086,8 @@ Result RaftGroupMetadata::schema_version(ColocationId colocation_ Result RaftGroupMetadata::schema_version(const Uuid& cotable_id) const { DCHECK_NE(state_, kNotLoadedYet); if (cotable_id.IsNil()) { - // Return the parent table schema version - return schema_version(); + // Return the parent table schema version. + return schema_version(""); } auto res = GetTableInfo(cotable_id.ToHexString()); diff --git a/src/yb/tablet/tablet_metadata.h b/src/yb/tablet/tablet_metadata.h index 6df8b72c7cbf..c103d1f95eb2 100644 --- a/src/yb/tablet/tablet_metadata.h +++ b/src/yb/tablet/tablet_metadata.h @@ -351,8 +351,11 @@ class RaftGroupMetadata : public RefCountedThreadSafe, std::shared_ptr index_map(const TableId& table_id = "") const; + SchemaVersion primary_table_schema_version() const; + + // Non-colocated tables should use primary_table_schema_version(). [[deprecated]] - SchemaVersion schema_version(const TableId& table_id = "") const; + SchemaVersion schema_version(const TableId& table_id) const; Result schema_version(ColocationId colocation_id) const; diff --git a/src/yb/tablet/tablet_peer-test.cc b/src/yb/tablet/tablet_peer-test.cc index e1afe4b01f2b..41e8a3a8b1de 100644 --- a/src/yb/tablet/tablet_peer-test.cc +++ b/src/yb/tablet/tablet_peer-test.cc @@ -193,7 +193,7 @@ class TabletPeerTest : public YBTabletTest { }; ASSERT_OK(Log::Open(LogOptions(), tablet()->tablet_id(), metadata->wal_dir(), metadata->fs_manager()->uuid(), *tablet()->schema(), - metadata->schema_version(), table_metric_entity_.get(), + metadata->primary_table_schema_version(), table_metric_entity_.get(), tablet_metric_entity_.get(), log_thread_pool_.get(), log_thread_pool_.get(), log_thread_pool_.get(), &log, pre_log_rollover_callback, new_segment_allocation_callback)); diff --git a/src/yb/tablet/write_query.cc b/src/yb/tablet/write_query.cc index 1fb070844312..80f851cfc987 100644 --- a/src/yb/tablet/write_query.cc +++ b/src/yb/tablet/write_query.cc @@ -320,7 +320,7 @@ void WriteQuery::Finished(WriteOperation* operation, const Status& status) { if (!status.IsAborted()) { auto schema_version = 0; if (sv.table_id().empty()) { - schema_version = metadata.schema_version(); + schema_version = metadata.primary_table_schema_version(); } else { auto uuid = Uuid::FromSlice(sv.table_id()); CHECK(uuid.ok()); @@ -451,7 +451,7 @@ Result WriteQuery::CqlRePrepareExecuteIfNecessary() { auto tablet = VERIFY_RESULT(tablet_safe()); auto& metadata = *tablet->metadata(); VLOG_WITH_FUNC(2) << "Schema version for " << metadata.table_name() << ": " - << metadata.schema_version(); + << metadata.primary_table_schema_version(); // Check if the schema version set in client_request_->ql_write_batch() is compatible with // the current schema pointed to by the tablet's metadata. if (!VERIFY_RESULT(CqlCheckSchemaVersion())) { @@ -463,7 +463,7 @@ Result WriteQuery::CqlRePrepareExecuteIfNecessary() { IllegalState, "Unexpected value encountered for write_batch().table_schema_version_size()"); auto* write_batch = request().mutable_write_batch(); const auto& schema_version = write_batch->table_schema_version().front().schema_version(); - if (schema_version == metadata.schema_version()) { + if (schema_version == metadata.primary_table_schema_version()) { return true; } // It could still happen that the schema version set in request() is one behind the current @@ -474,11 +474,11 @@ Result WriteQuery::CqlRePrepareExecuteIfNecessary() { // of requests in ql_write_batch with that of the current metadata and doesn't check the // schema compatibility for operations in 'request().write_batch()'. SCHECK_EQ( - schema_version + 1, metadata.schema_version(), + schema_version + 1, metadata.primary_table_schema_version(), IllegalState, "Expected current schema version to be ahead by at most 1"); write_batch->mutable_table_schema_version()->Clear(); docdb::AddTableSchemaVersion( - Uuid::Nil(), metadata.schema_version(), request().mutable_write_batch()); + Uuid::Nil(), metadata.primary_table_schema_version(), request().mutable_write_batch()); RETURN_NOT_OK( CqlPopulateDocOps(tablet, client_request_, &doc_ops_, response_, true /* reset_ops */)); return true; @@ -490,14 +490,14 @@ Result WriteQuery::CqlPrepareExecute() { auto& metadata = *tablet->metadata(); VLOG_WITH_FUNC(2) << "Schema version for " << metadata.table_name() << ": " - << metadata.schema_version(); + << metadata.primary_table_schema_version(); if (!VERIFY_RESULT(CqlCheckSchemaVersion())) { return false; } docdb::AddTableSchemaVersion( - Uuid::Nil(), metadata.schema_version(), request().mutable_write_batch()); + Uuid::Nil(), metadata.primary_table_schema_version(), request().mutable_write_batch()); RETURN_NOT_OK(CqlPopulateDocOps(tablet, client_request_, &doc_ops_, response_)); return true; } diff --git a/src/yb/tools/fs_tool.cc b/src/yb/tools/fs_tool.cc index 00283a2b2166..034068c6effa 100644 --- a/src/yb/tools/fs_tool.cc +++ b/src/yb/tools/fs_tool.cc @@ -248,7 +248,8 @@ Status FsTool::PrintTabletMeta(const string& tablet_id, int indent) { << std::endl; std::cout << Indent(indent) << "Table name: " << meta->table_name() << " Table id: " << meta->table_id() << std::endl; - std::cout << Indent(indent) << "Schema (version=" << meta->schema_version() << "): " + std::cout << Indent(indent) + << Format("Schema (primary table version=$0)", meta->primary_table_schema_version()) << schema->ToString() << std::endl; tablet::RaftGroupReplicaSuperBlockPB pb; diff --git a/src/yb/tserver/read_query.cc b/src/yb/tserver/read_query.cc index d7defc417162..82348e9549ce 100644 --- a/src/yb/tserver/read_query.cc +++ b/src/yb/tserver/read_query.cc @@ -707,9 +707,8 @@ Result ReadQuery::DoReadImpl() { } if (!req_->pgsql_batch().empty()) { - ReadRequestPB* mutable_req = const_cast(req_); size_t total_num_rows_read = 0; - for (PgsqlReadRequestPB& pgsql_read_req : *mutable_req->mutable_pgsql_batch()) { + for (const auto& pgsql_read_req : req_->pgsql_batch()) { tablet::PgsqlReadRequestResult result(&context_.sidecars().Start()); TRACE("Start HandlePgsqlReadRequest"); RETURN_NOT_OK(abstract_tablet_->HandlePgsqlReadRequest( diff --git a/src/yb/tserver/tablet_service.cc b/src/yb/tserver/tablet_service.cc index 91db686e74ab..cabd5cad1a0a 100644 --- a/src/yb/tserver/tablet_service.cc +++ b/src/yb/tserver/tablet_service.cc @@ -739,7 +739,9 @@ void TabletServiceAdminImpl::BackfillIndex( return; } - const uint32_t our_schema_version = tablet.peer->tablet_metadata()->schema_version(); + // TODO(asrivastava): This does not correctly handle colocated tables. + const uint32_t our_schema_version = + tablet.peer->tablet_metadata()->primary_table_schema_version(); const uint32_t their_schema_version = req->schema_version(); bool all_at_backfill = true; bool all_past_backfill = true; diff --git a/src/yb/tserver/ts_tablet_manager.cc b/src/yb/tserver/ts_tablet_manager.cc index 0e93f0c2fb67..82854da0d8d1 100644 --- a/src/yb/tserver/ts_tablet_manager.cc +++ b/src/yb/tserver/ts_tablet_manager.cc @@ -2627,7 +2627,8 @@ void TSTabletManager::CreateReportedTabletPB(const TabletPeerPtr& tablet_peer, AppStatusPB* error_status = reported_tablet->mutable_error(); StatusToPB(tablet_peer->error(), error_status); } - reported_tablet->set_schema_version(tablet_peer->tablet_metadata()->schema_version()); + reported_tablet->set_schema_version( + tablet_peer->tablet_metadata()->primary_table_schema_version()); tablet_peer->tablet_metadata()->GetTableIdToSchemaVersionMap( reported_tablet->mutable_table_to_version()); From 02a4bac72ef754ab155d67e60677dd935c38fe31 Mon Sep 17 00:00:00 2001 From: Nikhil Chandrappa Date: Thu, 12 Sep 2024 13:16:49 +0000 Subject: [PATCH 15/75] [#23896] Changing the pg-parity flag from --enable_pg_parity_tech_preview to --enable_pg_parity_early_access. Summary: Original commit to change this value was incorrectly resolved during a merge conflict resulting in incorrect flag value. Changing the flag name to --enable_pg_parity_early_access in all the branches. Test Plan: ./yb_build.sh --java-test 'org.yb.yugabyted.*' Reviewers: sgarg-yb, djiang Reviewed By: djiang Subscribers: yugabyted-dev Differential Revision: https://phorge.dev.yugabyte.com/D37996 --- bin/yugabyted | 6 +++--- .../test/java/org/yb/yugabyted/TestYugabytedPgParity.java | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/yugabyted b/bin/yugabyted index 9ba8f54f9723..02470522e980 100755 --- a/bin/yugabyted +++ b/bin/yugabyted @@ -7158,9 +7158,9 @@ class ControlScript(object): self.configs.saved_data["ca_cert_file_path"] = os.path.join(args.certs_dir, "ca.crt") - if args.enable_pg_parity_tech_preview: + if args.enable_pg_parity_early_access: self.configs.temp_data["enable_pg_parity"] = \ - args.enable_pg_parity_tech_preview + args.enable_pg_parity_early_access args.background = self.parse_bool(args.background) if args.ui is not None: @@ -7705,7 +7705,7 @@ class ControlScript(object): "--config", help="{} user configuration file path".format( SCRIPT_NAME), metavar="") cur_parser.add_argument( - "--enable_pg_parity_tech_preview", help="Enable PostgreSQL compatibility features." + "--enable_pg_parity_early_access", help="Enable PostgreSQL compatibility features." " Default value is False.", action="store_true", default=False) # Hidden commands for development/advanced users diff --git a/java/yb-yugabyted/src/test/java/org/yb/yugabyted/TestYugabytedPgParity.java b/java/yb-yugabyted/src/test/java/org/yb/yugabyted/TestYugabytedPgParity.java index 3e7bd331651f..4913bb31efee 100644 --- a/java/yb-yugabyted/src/test/java/org/yb/yugabyted/TestYugabytedPgParity.java +++ b/java/yb-yugabyted/src/test/java/org/yb/yugabyted/TestYugabytedPgParity.java @@ -38,7 +38,7 @@ public TestYugabytedPgParity() { clusterConfigurations = new ArrayList<>(); Map yugabytedFlags = new HashMap<>(); - yugabytedFlags.put("enable_pg_parity_tech_preview", ""); + yugabytedFlags.put("enable_pg_parity_early_access", ""); for (int i = 0; i < clusterParameters.numNodes; i++) { MiniYugabytedNodeConfigurations nodeConfigurations = From dbddb04d30038b97f58657962f278c342526339f Mon Sep 17 00:00:00 2001 From: Bvsk Patnaik Date: Fri, 13 Sep 2024 14:28:45 -0700 Subject: [PATCH 16/75] [#23814] docs: Add log_dist option to auto_explain page. (#23921) Mention log_dist option that allows users to disable DIST stats when using auto_explain with log_analyze. --------- Co-authored-by: Dwight Hodge --- .../pg-extensions/extension-auto-explain.md | 1 + .../pg-extensions/extension-auto-explain.md | 1 + .../pg-extensions/extension-auto-explain.md | 1 + 3 files changed, 3 insertions(+) diff --git a/docs/content/preview/explore/ysql-language-features/pg-extensions/extension-auto-explain.md b/docs/content/preview/explore/ysql-language-features/pg-extensions/extension-auto-explain.md index e819224426e9..56c9099a07e9 100644 --- a/docs/content/preview/explore/ysql-language-features/pg-extensions/extension-auto-explain.md +++ b/docs/content/preview/explore/ysql-language-features/pg-extensions/extension-auto-explain.md @@ -38,6 +38,7 @@ You can customize the following auto_explain parameters: | `log_format` | The format of the EXPLAIN output. Allowed values are `text`, `xml`, `json`, and `yaml`. Only superusers can change this setting. | text | | `log_nested_statements` | Consider nested statements (statements executed inside a function) for logging. When off, only top-level query plans are logged. Only superusers can change this setting. | false | | `sample_rate` | Explain only a set fraction of the statements in each session. The default 1 means explain all the queries. In case of nested statements, either all will be explained or none. Only superusers can change this setting. | 1 | +| `log_dist` | Set to false to disable the [DIST option](../../../../api/ysql/the-sql-language/statements/perf_explain/#dist) of `EXPLAIN ANALYZE`. True by default, equivalent to `EXPLAIN (ANALYZE, DIST)`. This setting only applies when `log_analyze` is true. | true | Note that the default behavior is to do nothing, so you must set at least `auto_explain.log_min_duration` if you want any results. diff --git a/docs/content/stable/explore/ysql-language-features/pg-extensions/extension-auto-explain.md b/docs/content/stable/explore/ysql-language-features/pg-extensions/extension-auto-explain.md index a0888e61a69e..ffa8b3488a9d 100644 --- a/docs/content/stable/explore/ysql-language-features/pg-extensions/extension-auto-explain.md +++ b/docs/content/stable/explore/ysql-language-features/pg-extensions/extension-auto-explain.md @@ -38,6 +38,7 @@ You can customize the following auto_explain parameters: | `log_format` | The format of the EXPLAIN output. Allowed values are `text`, `xml`, `json`, and `yaml`. Only superusers can change this setting. | text | | `log_nested_statements` | Consider nested statements (statements executed inside a function) for logging. When off, only top-level query plans are logged. Only superusers can change this setting. | false | | `sample_rate` | Explain only a set fraction of the statements in each session. The default 1 means explain all the queries. In case of nested statements, either all will be explained or none. Only superusers can change this setting. | 1 | +| `log_dist` | Set to false to disable the [DIST option](../../../../api/ysql/the-sql-language/statements/perf_explain/#dist) of `EXPLAIN ANALYZE`. True by default, equivalent to `EXPLAIN (ANALYZE, DIST)`. This setting only applies when `log_analyze` is true. | true | Note that the default behavior is to do nothing, so you must set at least `auto_explain.log_min_duration` if you want any results. diff --git a/docs/content/v2.20/explore/ysql-language-features/pg-extensions/extension-auto-explain.md b/docs/content/v2.20/explore/ysql-language-features/pg-extensions/extension-auto-explain.md index 1e68ac111cc1..1ac8d62d1445 100644 --- a/docs/content/v2.20/explore/ysql-language-features/pg-extensions/extension-auto-explain.md +++ b/docs/content/v2.20/explore/ysql-language-features/pg-extensions/extension-auto-explain.md @@ -38,6 +38,7 @@ You can customize the following auto_explain parameters: | `log_format` | The format of the EXPLAIN output. Allowed values are `text`, `xml`, `json`, and `yaml`. Only superusers can change this setting. | text | | `log_nested_statements` | Consider nested statements (statements executed inside a function) for logging. When off, only top-level query plans are logged. Only superusers can change this setting. | false | | `sample_rate` | Explain only a set fraction of the statements in each session. The default 1 means explain all the queries. In case of nested statements, either all will be explained or none. Only superusers can change this setting. | 1 | +| `log_dist` | Set to false to disable the [DIST option](../../../../api/ysql/the-sql-language/statements/perf_explain/#dist) of `EXPLAIN ANALYZE`. True by default, equivalent to `EXPLAIN (ANALYZE, DIST)`. This setting only applies when `log_analyze` is true. | true | Note that the default behavior is to do nothing, so you must set at least `auto_explain.log_min_duration` if you want any results. From 56a9b4980f61b7b8326d5257464d9096537fc7a1 Mon Sep 17 00:00:00 2001 From: jhe Date: Fri, 13 Sep 2024 01:05:20 -0700 Subject: [PATCH 17/75] [#23886] docdb: Fix load balancer leader balancing for geopartitioned tables Summary: Fixing 2 issues with leader balancing. # Call SortLeaderLoad() when processing tablets/leaders to move for a table. - This will ensure that we correctly use the full global config as well as any updates from previous tables. 2. Do not return immediately for global leader balancing when we have a global difference of <2. - We are sorted by (is_leader_blacklisted, leader_load, global_leader_load). But since we skip global leader balancing if right is blacklisted, consider (leader_load, global_leader_load) as the sort order. - Also note that we must have leader load_variance of 1 between right and left in order to consider global balancing moves. - If the global_load_variance is under our min(2), then we don't want to do a move since otherwise we may keep bouncing this leader around. Since the next right tservers will have a <= leader_load or global_leader_load, we can stop early here for this left tserver -- other right tservers will either have load_variance = 0 or also have global_load_variance < 2. - However, as opposed to global load balancing, we cannot return early here, and instead must just break. This is because for global load balancing we can always find a tablet to move from the high load TS to the low load TS (assuming proper placements). But for leader balancing, we have the additional constraint that both tservers must have a peer for this tablet. Thus we must continue to the next left tserver. These fixes are useful in many leader balancing situations where we may have been exiting early, but are most noticeable in geopartitioned clusters with single tablet tables, where one region may not get leader balanced at all due to incorrect global leader sorting. Jira: DB-12791 Test Plan: added a new test that fails before these changes, as it would not be able to follow the leader blacklist ``` ybd --cxx-test load_balancer_mini_cluster-test --gtest_filter "LoadBalancerMiniClusterTest.LeaderMovesWithGeopartitionedTables" ``` Reviewers: asrivastava, zdrudi Reviewed By: zdrudi Subscribers: ybase Differential Revision: https://phorge.dev.yugabyte.com/D37997 --- .../load_balancer_mini_cluster-test.cc | 106 ++++++++++++++++-- src/yb/master/cluster_balance.cc | 31 +++-- src/yb/master/cluster_balance_util.h | 30 ++--- 3 files changed, 123 insertions(+), 44 deletions(-) diff --git a/src/yb/integration-tests/load_balancer_mini_cluster-test.cc b/src/yb/integration-tests/load_balancer_mini_cluster-test.cc index 29c2b60bda03..81d0a90c8b91 100644 --- a/src/yb/integration-tests/load_balancer_mini_cluster-test.cc +++ b/src/yb/integration-tests/load_balancer_mini_cluster-test.cc @@ -15,8 +15,7 @@ #include "yb/client/client.h" -#include "yb/consensus/consensus.pb.h" -#include "yb/consensus/consensus.proxy.h" +#include "yb/client/table_creator.h" #include "yb/gutil/dynamic_annotations.h" @@ -24,7 +23,6 @@ #include "yb/integration-tests/mini_cluster.h" #include "yb/integration-tests/yb_table_test_base.h" -#include "yb/master/catalog_entity_info.pb.h" #include "yb/master/cluster_balance.h" #include "yb/master/master.h" @@ -50,6 +48,7 @@ METRIC_DECLARE_gauge_uint32(total_table_load_difference); DECLARE_int32(catalog_manager_bg_task_wait_ms); DECLARE_bool(enable_load_balancing); +DECLARE_string(instance_uuid_override); DECLARE_bool(load_balancer_drive_aware); DECLARE_int32(load_balancer_max_concurrent_moves); DECLARE_int32(replication_factor); @@ -119,17 +118,23 @@ void WaitForReplicaOnTS(yb::MiniCluster* mini_cluster, }, kDefaultTimeout, "WaitForAddTaskToBeProcessed")); } -void WaitLoadBalancerActive(client::YBClient* client) { - ASSERT_OK(WaitFor([&]() -> Result { - bool is_idle = VERIFY_RESULT(client->IsLoadBalancerIdle()); - return !is_idle; - }, kDefaultTimeout, "IsLoadBalancerActive")); +void WaitLoadBalancerActive( + client::YBClient* client, const std::string& msg = "IsLoadBalancerActive", + const std::chrono::milliseconds timeout = kDefaultTimeout) { + ASSERT_OK(WaitFor( + [&]() -> Result { + bool is_idle = VERIFY_RESULT(client->IsLoadBalancerIdle()); + return !is_idle; + }, + timeout * kTimeMultiplier, msg)); } -void WaitLoadBalancerIdle(client::YBClient* client) { - ASSERT_OK(WaitFor([&]() -> Result { - return client->IsLoadBalancerIdle(); - }, kDefaultTimeout, "IsLoadBalancerIdle")); +void WaitLoadBalancerIdle( + client::YBClient* client, const std::string& msg = "IsLoadBalancerIdle", + const std::chrono::milliseconds timeout = kDefaultTimeout) { + ASSERT_OK(WaitFor( + [&]() -> Result { return client->IsLoadBalancerIdle(); }, timeout * kTimeMultiplier, + msg)); } typedef std::unordered_mapModifyPlacementInfo( + "cloud1.rack1.zone,cloud1.rack2.zone,cloud2.rack3.zone", 3, "")); + + // Add new set of tservers. + for (const auto& [cloud, rack, uuid] : { + std::tuple("cloud2", "rack4", "fffffffffffffffffffffffffffffffc"), + std::tuple("cloud3", "rack5", "fffffffffffffffffffffffffffffffd"), + std::tuple("cloud3", "rack6", "fffffffffffffffffffffffffffffffe"), + }) { + // Set large instance uuids to make sure these tservers are sorted last when breaking ties. + ANNOTATE_UNPROTECTED_WRITE(FLAGS_instance_uuid_override) = uuid; + tserver::TabletServerOptions extra_opts = + ASSERT_RESULT(tserver::TabletServerOptions::CreateTabletServerOptions()); + extra_opts.SetPlacement(cloud, rack, "zone"); + ASSERT_OK(mini_cluster()->AddTabletServer(extra_opts)); + } + ASSERT_OK(mini_cluster()->WaitForTabletServerCount(num_tablet_servers() + 3)); + + // Create two new tables. + for (int i = 2; i <= 3; ++i) { + client::YBTableName tn( + YQL_DATABASE_CQL, table_name().namespace_name(), "kv-table-test-" + std::to_string(i)); + client::YBSchemaBuilder b; + b.AddColumn("k")->Type(DataType::BINARY)->NotNull()->HashPrimaryKey(); + b.AddColumn("v")->Type(DataType::BINARY)->NotNull(); + ASSERT_OK(b.Build(&schema_)); + + ASSERT_OK(NewTableCreator()->table_name(tn).schema(&schema_).Create()); + } + + // Get the same order of tables as the load balancer processes them in. + auto& catalog_manager = ASSERT_RESULT(mini_cluster()->GetLeaderMiniMaster())->catalog_manager(); + auto all_tables = + catalog_manager.GetTables(master::GetTablesMode::kAll, master::PrimaryTablesOnly::kTrue); + + // Select the first table to be processed by the load balancer, and move it to the other region. + client::YBTableName first_table_name( + YQLDatabase::YQL_DATABASE_CQL, all_tables[0]->namespace_name(), all_tables[0]->name()); + ASSERT_OK(yb_admin_client_->ModifyTablePlacementInfo( + first_table_name, "cloud2.rack4.zone,cloud3.rack5.zone,cloud3.rack6.zone", 3, "")); + + // Wait for the load balancer to finish. + WaitLoadBalancerActive( + client_.get(), "Waiting for LB to begin after changing placement of first table"); + // Full move of a table may take a bit longer, so wait longer for idle. + WaitLoadBalancerIdle( + client_.get(), "Waiting for load to settle after changing placement of first table", + kDefaultTimeout * 2); + + auto leader_counts = ASSERT_RESULT(yb_admin_client_->GetLeaderCounts(first_table_name)); + LOG(INFO) << "Leader counts before blacklist: " << ToString(leader_counts); + + // Now leader blacklist one of the new tservers. Ensure that the load balancer is able to move + // leaders off of it. + ASSERT_OK(AddTserverToBlacklist(4, true /* leader_blacklist */)); + WaitLoadBalancerActive(client_.get(), "Waiting for LB to begin after blacklisting tserver"); + WaitLoadBalancerIdle(client_.get(), "Waiting for load to settle after blacklisting tserver"); + + // Assert that the leaders have been moved off of the leader blacklisted tserver. + leader_counts = ASSERT_RESULT(yb_admin_client_->GetLeaderCounts(first_table_name)); + LOG(INFO) << "Leader counts after blacklist: " << ToString(leader_counts); + ASSERT_EQ(leader_counts.at(mini_cluster_->mini_tablet_server(4)->server()->permanent_uuid()), 0); + + // Finally, remove the tserver from the blacklist. + ASSERT_OK(RemoveTserverFromBlacklist(4, true /* leader_blacklist */)); + WaitLoadBalancerActive(client_.get(), "Waiting for LB to begin after unblacklisting tserver"); + WaitLoadBalancerIdle(client_.get(), "Waiting for load to settle after unblacklisting tserver"); + + // Ensure that even though we picked the first table to move, the load balancer is able to move + // leaders based on the correct overall global load, and not just the global load at the time of + // AnalyzeTablets. + leader_counts = ASSERT_RESULT(yb_admin_client_->GetLeaderCounts(first_table_name)); + LOG(INFO) << "Leader counts after unblacklist: " << ToString(leader_counts); + ASSERT_GT(leader_counts.at(mini_cluster_->mini_tablet_server(4)->server()->permanent_uuid()), 0); +} class LoadBalancerFailedDrive : public LoadBalancerMiniClusterTestBase { protected: diff --git a/src/yb/master/cluster_balance.cc b/src/yb/master/cluster_balance.cc index 48708f20b7ff..905d5369aabb 100644 --- a/src/yb/master/cluster_balance.cc +++ b/src/yb/master/cluster_balance.cc @@ -557,11 +557,12 @@ void ClusterLoadBalancer::RunLoadBalancerWithOptions(Options* options) { // We may have modified global loads, so we need to reset this state's load. state_->SortLoad(); + state_->SortLeaderLoad(); VLOG(2) << "Per table state for table: " << table->id() << ", " << state_->ToString(); VLOG(2) << "Global state: " << global_state_->ToString(); VLOG(2) << "Sorted load: " << table->id() << ", " << GetSortedLoad(); - VLOG(2) << "Global load: " << table->id() << ", " << GetSortedLeaderLoad(); + VLOG(2) << "Sorted leader load: " << table->id() << ", " << GetSortedLeaderLoad(); // Output parameters are unused in the load balancer, but useful in testing. TabletId out_tablet_id; @@ -1070,7 +1071,7 @@ Result ClusterLoadBalancer::GetLoadToMove( if (load_variance > 0 && CanBalanceGlobalLoad()) { int global_load_variance = global_state_->GetGlobalLoad(high_load_uuid) - global_state_->GetGlobalLoad(low_load_uuid); - if (global_load_variance < state_->options_->kMinGlobalLoadVarianceToBalance) { + if (global_load_variance < state_->options_->kMinLoadVarianceToBalance /* 2 */) { // Already globally balanced. Since we are sorted by global load, we can return here as // there are no other moves for us to make. return false; @@ -1309,13 +1310,14 @@ Result ClusterLoadBalancer::GetLeaderToMove( bool is_global_balancing_move = false; // Check for state change or end conditions. - if (left == right || (load_variance < state_->options_->kMinLeaderLoadVarianceToBalance && - !high_leader_blacklisted)) { + if (high_leader_blacklisted && state_->GetLeaderLoad(high_load_uuid) == 0) { + continue; // No leaders to move from this blacklisted TS. + } + if (left == right || (load_variance < state_->options_->kMinLoadVarianceToBalance /* 2 */ && + !high_leader_blacklisted)) { // Global leader balancing only if per table variance is > 0. - // If both left and right are same (i.e. load_variance is 0) and right is last_pos - // or right is last_pos and load_variance is 0 then we can return as we don't - // have any other moves to make. if (load_variance == 0 && right == last_pos) { + // We can return as we don't have any other moves to make. return false; } // Check if we can benefit from global leader balancing. @@ -1323,10 +1325,15 @@ Result ClusterLoadBalancer::GetLeaderToMove( if (load_variance > 0 && CanBalanceGlobalLoad()) { int global_load_variance = state_->global_state_->GetGlobalLeaderLoad(high_load_uuid) - state_->global_state_->GetGlobalLeaderLoad(low_load_uuid); - // Already globally balanced. Since we are sorted by global load, we can return here as - // there are no other moves for us to make. - if (global_load_variance < state_->options_->kMinGlobalLeaderLoadVarianceToBalance) { - return false; + // Already globally balanced. Since we are sorted by (leaders, global leader load), we can + // break here as there are no other leaders for us to move to this left tserver. + // However, as opposed to global load balancing, we cannot return early here, and instead + // must just break. This is because for global load balancing we can always find a tablet + // to move from the high load TS to the low load TS (assuming proper placements). But for + // leader balancing, we have the additional constraint that both tservers must have a peer + // for this tablet. Thus we must continue to the next left tserver. + if (global_load_variance < state_->options_->kMinLoadVarianceToBalance /* 2 */) { + break; } VLOG(3) << "This is a global leader balancing pass"; is_global_balancing_move = true; @@ -1369,7 +1376,7 @@ Result ClusterLoadBalancer::GetLeaderToMove( } // Leader movement solely due to leader blacklist. - if (load_variance < state_->options_->kMinLeaderLoadVarianceToBalance && + if (load_variance < state_->options_->kMinLoadVarianceToBalance /* 2 */ && high_leader_blacklisted) { LOG(INFO) << "Move tablet " << tablet.first << " leader from leader blacklisted TS " << *from_ts << " to TS " << *to_ts; diff --git a/src/yb/master/cluster_balance_util.h b/src/yb/master/cluster_balance_util.h index 638a6c5916b0..88c038fed612 100644 --- a/src/yb/master/cluster_balance_util.h +++ b/src/yb/master/cluster_balance_util.h @@ -177,17 +177,14 @@ struct Options { virtual ~Options() {} std::string ToString() { - std::string out = - Format("{ MinLoadVarianceToBalance: $0, MinGlobalLoadVarianceToBalance: $1, " - "MinLeaderLoadVarianceToBalance: $2, MinGlobalLeaderLoadVarianceToBalance: $3, " - "AllowLimitStartingTablets: $4, MaxTabletRemoteBootstraps: $5, " - "MaxTabletRemoteBootstrapsPerTable: $6, AllowLimitOverReplicatedTablets: $7, " - "MaxOverReplicatedTablets: $8, MaxConcurrentRemovals: $9, ", - kMinLoadVarianceToBalance, kMinGlobalLoadVarianceToBalance, - kMinLeaderLoadVarianceToBalance, kMinGlobalLeaderLoadVarianceToBalance, - kAllowLimitStartingTablets, kMaxTabletRemoteBootstraps, - kMaxTabletRemoteBootstrapsPerTable, kAllowLimitOverReplicatedTablets, - kMaxOverReplicatedTablets, kMaxConcurrentRemovals); + std::string out = Format( + "{ MinLoadVarianceToBalance: $0, AllowLimitStartingTablets: $1, " + "MaxTabletRemoteBootstraps: $2, MaxTabletRemoteBootstrapsPerTable: $3, " + "AllowLimitOverReplicatedTablets: $4, MaxOverReplicatedTablets: $5, " + "MaxConcurrentRemovals: $6, ", + kMinLoadVarianceToBalance, kAllowLimitStartingTablets, kMaxTabletRemoteBootstraps, + kMaxTabletRemoteBootstrapsPerTable, kAllowLimitOverReplicatedTablets, + kMaxOverReplicatedTablets, kMaxConcurrentRemovals); out += Format("MaxConcurrentAdds: $0, MaxConcurrentLeaderMoves: $1, " "MaxConcurrentLeaderMovesPerTable: $2, ReplicaType: $3, " @@ -198,17 +195,10 @@ struct Options { } // If variance between load on TS goes past this number, we should try to balance. + // Don't balance load variance lower than this else we will repeatedly bounce the same peer back + // and forth. double kMinLoadVarianceToBalance = 2.0; - // If variance between global load on TS goes past this number, we should try to balance. - double kMinGlobalLoadVarianceToBalance = 2.0; - - // If variance between leader load on TS goes past this number, we should try to balance. - double kMinLeaderLoadVarianceToBalance = 2.0; - - // If variance between global leader load on TS goes past this number, we should try to balance. - double kMinGlobalLeaderLoadVarianceToBalance = 2.0; - // Whether to limit the number of tablets being spun up on the cluster at any given time. bool kAllowLimitStartingTablets = true; From 48112fb0a625a1351a50231853b1575f784f6f0a Mon Sep 17 00:00:00 2001 From: Dwight Hodge <79169168+ddhodge@users.noreply.github.com> Date: Fri, 13 Sep 2024 21:12:53 -0400 Subject: [PATCH 18/75] [doc][yba] Warning for disk encryption agent (#23850) * Warning for disk encryption agent * edits * edits * Update docs/content/preview/deploy/manual-deployment/start-tservers.md * Update docs/content/preview/deploy/manual-deployment/start-masters.md Co-authored-by: Aishwarya Chakravarthy * review comment * edits --------- Co-authored-by: Aishwarya Chakravarthy --- .../manual-deployment/install-software.md | 6 +++++ .../deploy/manual-deployment/start-masters.md | 19 ++++++++-------- .../manual-deployment/start-tservers.md | 22 +++++++------------ .../prepare/server-nodes-software/_index.md | 6 +++++ .../manual-deployment/install-software.md | 6 +++++ .../deploy/manual-deployment/start-masters.md | 19 ++++++++-------- .../manual-deployment/start-tservers.md | 22 +++++++------------ .../prepare/server-nodes-software/_index.md | 6 +++++ 8 files changed, 60 insertions(+), 46 deletions(-) diff --git a/docs/content/preview/deploy/manual-deployment/install-software.md b/docs/content/preview/deploy/manual-deployment/install-software.md index 4a2a793fe272..eed52aa537e8 100644 --- a/docs/content/preview/deploy/manual-deployment/install-software.md +++ b/docs/content/preview/deploy/manual-deployment/install-software.md @@ -17,6 +17,12 @@ Installing YugabyteDB involves completing prerequisites and downloading the Yuga {{% readfile "/preview/quick-start/include-prerequisites-linux.md" %}} +### Using disk encryption software with YugabyteDB + +If you are using third party disk encryption software, such as Vormetric or CipherTrust, the disk encryption service must be up and running on the node before starting any YugabyteDB services. If YugabyteDB processes start _before_ the encryption service, restarting an already encrypted node can result in data corruption. + +To avoid issues, stop YugabyteDB services on the node _before_ enabling or disabling the disk encryption service. + ## Download YugabyteDB YugabyteDB supports both x86 and ARM (aarch64) CPU architectures. Download packages ending in `x86_64.tar.gz` to run on x86, and packages ending in `aarch64.tar.gz` to run on ARM. diff --git a/docs/content/preview/deploy/manual-deployment/start-masters.md b/docs/content/preview/deploy/manual-deployment/start-masters.md index 29270d9a9840..e934818b09f0 100644 --- a/docs/content/preview/deploy/manual-deployment/start-masters.md +++ b/docs/content/preview/deploy/manual-deployment/start-masters.md @@ -11,16 +11,11 @@ menu: type: docs --- -{{< note title="Note" >}} - -- The number of nodes in a cluster running YB-Masters **must** equal the replication factor. -- The number of comma-separated addresses present in `master_addresses` should also equal the replication factor. -- For running a single cluster across multiple data centers or 2 clusters in 2 data centers, refer to the [Multi-DC deployments](../../../deploy/multi-dc/) section. -- Read more about the [yb-master service architecture](../../../architecture/yb-master/). +This section covers deployment for a single region or data center in a multi-zone/multi-rack configuration. Note that single zone configuration is a special case of multi-zone where all placement-related flags are set to the same value across every node. -{{< /note >}} +For instructions on running a single cluster across multiple data centers or 2 clusters in 2 data centers, refer to [Multi-DC deployments](../../../deploy/multi-dc/). -This section covers deployment for a single region or data center in a multi-zone/multi-rack configuration. Note that single zone configuration is a special case of multi-zone where all placement-related flags are set to the same value across every node. +For information about YB-Master, refer to [YB-Master service](../../../architecture/yb-master/). ## Example scenario @@ -32,7 +27,9 @@ This section covers deployment for a single region or data center in a multi-zon ## Run YB-Master servers with command line flags -Run the yb-master server on each of the three nodes as follows. Note how multiple directories can be provided to the [`--fs_data_dirs`](../../../reference/configuration/yb-master/#fs-data-dirs) flag. Replace the [`--rpc_bind_addresses`](../../../reference/configuration/yb-master/#rpc-bind-addresses) value with the private IP address of the host as well as the set the `placement_cloud`,`placement_region` and `placement_zone` values appropriately. For single zone deployment, use the same value for the `placement_zone` flag. +The number of nodes in a cluster running YB-Masters must equal the replication factor. + +Run the yb-master server on each of the three nodes as follows. ```sh $ ./bin/yb-master \ @@ -45,6 +42,10 @@ $ ./bin/yb-master \ >& /home/centos/disk1/yb-master.out & ``` +The number of comma-separated addresses in `--master_addresses` should equal the replication factor. + +You can specify multiple directories using the [`--fs_data_dirs`](../../../reference/configuration/yb-master/#fs-data-dirs) flag. Replace the [`--rpc_bind_addresses`](../../../reference/configuration/yb-master/#rpc-bind-addresses) value with the private IP address of the host, and set the `placement_cloud`, `placement_region`, and `placement_zone` values appropriately. For single zone deployment, use the same value for the `placement_zone` flag. + For the full list of configuration flags, see the [YB-Master reference](../../../reference/configuration/yb-master/). ## Run YB-Master servers with configuration file diff --git a/docs/content/preview/deploy/manual-deployment/start-tservers.md b/docs/content/preview/deploy/manual-deployment/start-tservers.md index 3d6ddd2dee58..4e01aab9f0e9 100644 --- a/docs/content/preview/deploy/manual-deployment/start-tservers.md +++ b/docs/content/preview/deploy/manual-deployment/start-tservers.md @@ -11,15 +11,11 @@ menu: type: docs --- -{{< note title="Note" >}} - -- The number of nodes in a cluster running YB-TServers **must** equal or exceed the replication factor in order for any table to get created successfully. -- For running a single cluster across multiple data centers or 2 clusters in 2 data centers, refer to the [Multi-DC Deployments](../../../deploy/multi-dc/) section. -- Read more about the [yb-tserver service architecture](../../../architecture/yb-tserver/). +This section covers deployment for a single region or data center in a multi-zone/multi-rack configuration. Note that single zone configuration is a special case of multi-zone where all placement related flags are set to the same value across every node. -{{< /note >}} +For instructions on running a single cluster across multiple data centers or 2 clusters in 2 data centers, refer to [Multi-DC deployments](../../../deploy/multi-dc/). -This section covers deployment for a single region or data center in a multi-zone/multi-rack configuration. Note that single zone configuration is a special case of multi-zone where all placement related flags are set to the same value across every node. +For information about YB-TServer, refer to [YB-TServer service](../../../architecture/yb-tserver/). ## Example scenario @@ -32,7 +28,9 @@ This section covers deployment for a single region or data center in a multi-zon ## Run YB-TServer with command line flags -Run the yb-tserver server on each of the six nodes as follows. Note that all of the master addresses have to be provided using the `--tserver_master_addrs` flag. Replace the [`--rpc_bind_addresses`](../../../reference/configuration/yb-tserver/#rpc-bind-addresses) value with the private IP address of the host, and set the `placement_cloud`, `placement_region`, and `placement_zone` values appropriately. For single zone deployment, use the same value for the `--placement_zone` flag. +The number of nodes in a cluster running YB-TServers must equal or exceed the replication factor in order for any table to get created successfully. + +Run the yb-tserver server on each of the six nodes as follows. ```sh $ ./bin/yb-tserver \ @@ -48,13 +46,9 @@ $ ./bin/yb-tserver \ >& /home/centos/disk1/yb-tserver.out & ``` -For the full list of configuration flags, see the [YB-TServer reference](../../../reference/configuration/yb-tserver/). - -{{< note title="Note" >}} +Provide all of the master addresses using the [`--tserver_master_addrs`](../../../reference/configuration/yb-tserver/#tserver-master-addrs) flag. Replace the [`--rpc_bind_addresses`](../../../reference/configuration/yb-tserver/#rpc-bind-addresses) value with the private IP address of the host, and set the `placement_cloud`, `placement_region`, and `placement_zone` values appropriately. For single zone deployment, use the same value for the `--placement_zone` flag. -The number of comma-separated values in the [`--tserver_master_addrs`](../../../reference/configuration/yb-tserver/#tserver-master-addrs) flag should match the total number of YB-Master servers (that is, the replication factor). - -{{< /note >}} +For the full list of configuration flags, see the [YB-TServer reference](../../../reference/configuration/yb-tserver/). ## Run YB-TServer with configuration file diff --git a/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/_index.md b/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/_index.md index ef481be466f1..79af7090e5de 100644 --- a/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/_index.md +++ b/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/_index.md @@ -19,6 +19,12 @@ The Linux OS and other software components on each database cluster node must me Depending on the [provider type](../../yba-overview/#provider-configurations) and permissions you grant, you may have to install all of these requirements manually, or YugabyteDB Anywhere (YBA) will install it all automatically. +{{< warning title="Using disk encryption software with YugabyteDB" >}} +If you are using third party disk encryption software, such as Vormetric or CipherTrust, the disk encryption service must be up and running on the node before starting any YugabyteDB services. If YugabyteDB processes start _before_ the encryption service, restarting an already encrypted node can result in data corruption. + +To avoid problems, [pause the universe](../../../manage-deployments/delete-universe/#pause-a-universe) _before_ enabling or disabling the disk encryption service on universe nodes. +{{< /warning >}} + ##### Linux OS YBA supports deploying YugabyteDB on a variety of [operating systems](../../../reference/configuration/operating-systems/). diff --git a/docs/content/stable/deploy/manual-deployment/install-software.md b/docs/content/stable/deploy/manual-deployment/install-software.md index 70a6f69a61aa..49ded970a632 100644 --- a/docs/content/stable/deploy/manual-deployment/install-software.md +++ b/docs/content/stable/deploy/manual-deployment/install-software.md @@ -17,6 +17,12 @@ Installing YugabyteDB involves completing prerequisites and downloading the Yuga {{% readfile "/preview/quick-start/include-prerequisites-linux.md" %}} +### Using disk encryption software with YugabyteDB + +If you are using third party disk encryption software, such as Vormetric or CipherTrust, the disk encryption service must be up and running on the node before starting any YugabyteDB services. If YugabyteDB processes start _before_ the encryption service, restarting an already encrypted node can result in data corruption. + +To avoid issues, stop YugabyteDB services on the node _before_ enabling or disabling the disk encryption service. + ## Download YugabyteDB YugabyteDB supports both x86 and ARM (aarch64) CPU architectures. Download packages ending in `x86_64.tar.gz` to run on x86, and packages ending in `aarch64.tar.gz` to run on ARM. diff --git a/docs/content/stable/deploy/manual-deployment/start-masters.md b/docs/content/stable/deploy/manual-deployment/start-masters.md index 3f245c824579..cf778504ae14 100644 --- a/docs/content/stable/deploy/manual-deployment/start-masters.md +++ b/docs/content/stable/deploy/manual-deployment/start-masters.md @@ -11,16 +11,11 @@ menu: type: docs --- -{{< note title="Note" >}} - -- The number of nodes in a cluster running YB-Masters **must** equal the replication factor. -- The number of comma-separated addresses present in `master_addresses` should also equal the replication factor. -- For running a single cluster across multiple data centers or 2 clusters in 2 data centers, refer to the [Multi-DC deployments](../../../deploy/multi-dc/) section. -- Read more about the [yb-master service architecture](../../../architecture/yb-master/). +This section covers deployment for a single region or data center in a multi-zone/multi-rack configuration. Note that single zone configuration is a special case of multi-zone where all placement-related flags are set to the same value across every node. -{{< /note >}} +For instructions on running a single cluster across multiple data centers or 2 clusters in 2 data centers, refer to [Multi-DC deployments](../../../deploy/multi-dc/). -This section covers deployment for a single region or data center in a multi-zone/multi-rack configuration. Note that single zone configuration is a special case of multi-zone where all placement-related flags are set to the same value across every node. +For information about YB-Master, refer to [YB-Master service](../../../architecture/yb-master/). ## Example scenario @@ -32,7 +27,9 @@ This section covers deployment for a single region or data center in a multi-zon ## Run YB-Master servers with command line flags -Run the `yb-master` server on each of the three nodes as shown below. Note how multiple directories can be provided to the [`--fs_data_dirs`](../../../reference/configuration/yb-master/#fs-data-dirs) flag. Replace the [`--rpc_bind_addresses`](../../../reference/configuration/yb-master/#rpc-bind-addresses) value with the private IP address of the host as well as the set the `placement_cloud`,`placement_region` and `placement_zone` values appropriately. For single zone deployment, use the same value for the `placement_zone` flag. +The number of nodes in a cluster running YB-Masters must equal the replication factor. + +Run the yb-master server on each of the three nodes as follows. ```sh $ ./bin/yb-master \ @@ -45,6 +42,10 @@ $ ./bin/yb-master \ >& /home/centos/disk1/yb-master.out & ``` +The number of comma-separated addresses in `--master_addresses` should equal the replication factor. + +You can specify multiple directories using the [`--fs_data_dirs`](../../../reference/configuration/yb-master/#fs-data-dirs) flag. Replace the [`--rpc_bind_addresses`](../../../reference/configuration/yb-master/#rpc-bind-addresses) value with the private IP address of the host, and set the `placement_cloud`, `placement_region`, and `placement_zone` values appropriately. For single zone deployment, use the same value for the `placement_zone` flag. + For the full list of configuration flags, see the [YB-Master reference](../../../reference/configuration/yb-master/). ## Run YB-Master servers with configuration file diff --git a/docs/content/stable/deploy/manual-deployment/start-tservers.md b/docs/content/stable/deploy/manual-deployment/start-tservers.md index ebb3b6321d57..bc36405a2788 100644 --- a/docs/content/stable/deploy/manual-deployment/start-tservers.md +++ b/docs/content/stable/deploy/manual-deployment/start-tservers.md @@ -11,15 +11,11 @@ menu: type: docs --- -{{< note title="Note" >}} - -- The number of nodes in a cluster running YB-TServers **must** equal or exceed the replication factor in order for any table to get created successfully. -- For running a single cluster across multiple data centers or 2 clusters in 2 data centers, refer to the [Multi-DC Deployments](../../../deploy/multi-dc/) section. -- Read more about the [yb-tserver service architecture](../../../architecture/yb-tserver/). +This section covers deployment for a single region or data center in a multi-zone/multi-rack configuration. Note that single zone configuration is a special case of multi-zone where all placement related flags are set to the same value across every node. -{{< /note >}} +For instructions on running a single cluster across multiple data centers or 2 clusters in 2 data centers, refer to [Multi-DC deployments](../../../deploy/multi-dc/). -This section covers deployment for a single region or data center in a multi-zone/multi-rack configuration. Note that single zone configuration is a special case of multi-zone where all placement related flags are set to the same value across every node. +For information about YB-TServer, refer to [YB-TServer service](../../../architecture/yb-tserver/). ## Example scenario @@ -32,7 +28,9 @@ This section covers deployment for a single region or data center in a multi-zon ## Run YB-TServer with command line flags -Run the `yb-tserver` server on each of the six nodes as follows. Note that all of the master addresses have to be provided using the `--tserver_master_addrs` flag. Replace the [`--rpc_bind_addresses`](../../../reference/configuration/yb-tserver/#rpc-bind-addresses) value with the private IP address of the host, and set the `placement_cloud`, `placement_region`, and `placement_zone` values appropriately. For single zone deployment, use the same value for the `--placement_zone` flag. +The number of nodes in a cluster running YB-TServers must equal or exceed the replication factor in order for any table to get created successfully. + +Run the yb-tserver server on each of the six nodes as follows. ```sh $ ./bin/yb-tserver \ @@ -48,13 +46,9 @@ $ ./bin/yb-tserver \ >& /home/centos/disk1/yb-tserver.out & ``` -For the full list of configuration flags, see the [YB-TServer reference](../../../reference/configuration/yb-tserver/). - -{{< note title="Note" >}} +Provide all of the master addresses using the [`--tserver_master_addrs`](../../../reference/configuration/yb-tserver/#tserver-master-addrs) flag. Replace the [`--rpc_bind_addresses`](../../../reference/configuration/yb-tserver/#rpc-bind-addresses) value with the private IP address of the host, and set the `placement_cloud`, `placement_region`, and `placement_zone` values appropriately. For single zone deployment, use the same value for the `--placement_zone` flag. -The number of comma-separated values in the [`--tserver_master_addrs`](../../../reference/configuration/yb-tserver/#tserver-master-addrs) flag should match the total number of YB-Master servers (that is, the replication factor). - -{{< /note >}} +For the full list of configuration flags, see the [YB-TServer reference](../../../reference/configuration/yb-tserver/). ## Run YB-TServer with configuration file diff --git a/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/_index.md b/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/_index.md index e4d8eaa8a387..5cf15c308b58 100644 --- a/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/_index.md +++ b/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/_index.md @@ -16,6 +16,12 @@ The Linux OS and other software components on each database cluster node must me Depending on the [provider type](../../yba-overview/#provider-configurations) and permissions you grant, you may have to install all of these requirements manually, or YugabyteDB Anywhere (YBA) will install it all automatically. +{{< warning title="Using disk encryption software with YugabyteDB" >}} +If you are using third party disk encryption software, such as Vormetric or CipherTrust, the disk encryption service must be up and running on the node before starting any YugabyteDB services. If YugabyteDB processes start _before_ the encryption service, restarting an already encrypted node can result in data corruption. + +To avoid problems, [pause the universe](../../../manage-deployments/delete-universe/#pause-a-universe) _before_ enabling or disabling the disk encryption service on universe nodes. +{{< /warning >}} + ##### Linux OS YBA supports deploying YugabyteDB on a variety of [operating systems](../../../reference/configuration/operating-systems/). From 06baaf0297e12ab950e41769ab2425fe82f0ae18 Mon Sep 17 00:00:00 2001 From: Utkarsh Munjal Date: Fri, 13 Sep 2024 12:53:35 +0530 Subject: [PATCH 19/75] [#23314] YSQL: Prevent moving a single colocated table to a different tablespace Summary: Previously, the `ALTER TABLE SET TABLESPACE ` command allowed changing the tablespace of a single colocated table, which should not be possible. - Colocated tables cannot be moved individually since de-colocation is not supported. - This fix ensures that attempting to move a single colocated table will now result in an error. - Instead, to move all tables within a tablespace, use: ```ALTER TABLE ALL IN TABLESPACE SET TABLESPACE CASCADE``` - The fix enforces this by setting `yb_cascade` in `AlterTableCmd` to true for the movement of all colocated tables. If `yb_cascade` is not true and a single colocated table is being moved, an error is thrown. JIRA: DB-12239 Test Plan: == Automated Testing == - ./yb_build.sh --java-test org.yb.pgsql.TestPgRegressColocatedTablesWithTablespaces#testPgRegressColocatedTablesWithTablespaces In the above test added a scenario, where we try to move a single colocated table in a tablespace, and ensures that it throws an error. Reviewers: skumar, yguan, aagrawal Reviewed By: yguan Subscribers: jason, yql Differential Revision: https://phorge.dev.yugabyte.com/D37055 --- src/postgres/src/backend/commands/tablecmds.c | 21 ++++++++++++++++--- src/postgres/src/backend/nodes/copyfuncs.c | 1 + src/postgres/src/backend/parser/gram.y | 1 + src/postgres/src/include/nodes/parsenodes.h | 2 ++ .../yb_colocated_tables_with_tablespaces.out | 8 +++++++ .../yb_colocated_tables_with_tablespaces.sql | 7 +++++++ 6 files changed, 37 insertions(+), 3 deletions(-) diff --git a/src/postgres/src/backend/commands/tablecmds.c b/src/postgres/src/backend/commands/tablecmds.c index 990c434ee174..ef621d9d442f 100644 --- a/src/postgres/src/backend/commands/tablecmds.c +++ b/src/postgres/src/backend/commands/tablecmds.c @@ -490,7 +490,7 @@ static ObjectAddress ATExecClusterOn(Relation rel, const char *indexName, static void ATExecDropCluster(Relation rel, LOCKMODE lockmode); static bool ATPrepChangePersistence(Relation rel, bool toLogged); static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, - const char *tablespacename, LOCKMODE lockmode); + const char *tablespacename, LOCKMODE lockmode, bool cascade); static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode); static void ATExecSetTableSpaceNoStorage(Relation rel, Oid newTableSpace); static void ATExecSetRelOptions(Relation rel, List *defList, @@ -4240,7 +4240,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW | ATT_INDEX | ATT_PARTITIONED_INDEX); /* This command never recurses */ - ATPrepSetTableSpace(tab, rel, cmd->name, lockmode); + ATPrepSetTableSpace(tab, rel, cmd->name, lockmode, cmd->yb_cascade); pass = AT_PASS_MISC; /* doesn't actually matter */ break; case AT_SetRelOptions: /* SET (...) */ @@ -12124,7 +12124,8 @@ ATExecDropCluster(Relation rel, LOCKMODE lockmode) * ALTER TABLE SET TABLESPACE */ static void -ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, const char *tablespacename, LOCKMODE lockmode) +ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, + const char *tablespacename, LOCKMODE lockmode, bool cascade) { Oid tablespaceId; @@ -12152,6 +12153,19 @@ ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, const char *tablespacen errmsg("cannot set tablespace for primary key index"))); } + if (IsYugaByteEnabled() && !cascade && MyDatabaseColocated && + YbGetTableProperties(rel)->is_colocated) + { + /* + * Cannot move one colocated relation alone + * Use Alter TABLE ALL IN TABLESPACE SET TABLESPACE CASCADE; + */ + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot move one colocated relation alone"), + errhint("Use ALTER ... ALL ... CASCADE to move all colocated relations."))); + } + /* Check that the tablespace exists */ tablespaceId = get_tablespace_oid(tablespacename, false); @@ -12934,6 +12948,7 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt) cmd->subtype = AT_SetTableSpace; cmd->name = stmt->new_tablespacename; + cmd->yb_cascade = true; cmds = lappend(cmds, cmd); diff --git a/src/postgres/src/backend/nodes/copyfuncs.c b/src/postgres/src/backend/nodes/copyfuncs.c index a36610a98b05..3f0f1b595fc3 100644 --- a/src/postgres/src/backend/nodes/copyfuncs.c +++ b/src/postgres/src/backend/nodes/copyfuncs.c @@ -3312,6 +3312,7 @@ _copyAlterTableCmd(const AlterTableCmd *from) COPY_SCALAR_FIELD(behavior); COPY_SCALAR_FIELD(missing_ok); COPY_SCALAR_FIELD(yb_is_add_primary_key); + COPY_SCALAR_FIELD(yb_cascade); return newnode; } diff --git a/src/postgres/src/backend/parser/gram.y b/src/postgres/src/backend/parser/gram.y index 76ed6e9aec34..b2d71dcf465c 100644 --- a/src/postgres/src/backend/parser/gram.y +++ b/src/postgres/src/backend/parser/gram.y @@ -2664,6 +2664,7 @@ alter_table_cmd: AlterTableCmd *n = makeNode(AlterTableCmd); n->subtype = AT_SetTableSpace; n->name = $3; + n->yb_cascade = false; $$ = (Node *)n; } /* ALTER TABLE SET (...) */ diff --git a/src/postgres/src/include/nodes/parsenodes.h b/src/postgres/src/include/nodes/parsenodes.h index a34caee096d6..630978d7af4d 100644 --- a/src/postgres/src/include/nodes/parsenodes.h +++ b/src/postgres/src/include/nodes/parsenodes.h @@ -1825,6 +1825,8 @@ typedef struct AlterTableCmd /* one subcommand of an ALTER TABLE */ DropBehavior behavior; /* RESTRICT or CASCADE for DROP cases */ bool missing_ok; /* skip error if missing? */ bool yb_is_add_primary_key; /* checks if adding primary key */ + bool yb_cascade; /* to restrict movement of single table in + * colocated tablespace */ } AlterTableCmd; diff --git a/src/postgres/src/test/regress/expected/yb_colocated_tables_with_tablespaces.out b/src/postgres/src/test/regress/expected/yb_colocated_tables_with_tablespaces.out index ac5ebd9d6c7b..129193986403 100644 --- a/src/postgres/src/test/regress/expected/yb_colocated_tables_with_tablespaces.out +++ b/src/postgres/src/test/regress/expected/yb_colocated_tables_with_tablespaces.out @@ -593,6 +593,14 @@ SELECT COUNT(*) FROM pg_yb_tablegroup WHERE grpname != 'default' AND (grpname = DROP TABLE table1; DROP TABLE table2; +-- Should not be able to move a single colocated table to other tablespace +CREATE TABLE t1(a int) TABLESPACE tsp1; +CREATE TABLE t2(a int) TABLESPACE tsp1; +ALTER TABLE t1 SET TABLESPACE tsp2; +ERROR: cannot move one colocated relation alone +HINT: Use ALTER ... ALL ... CASCADE to move all colocated relations. +DROP TABLE t1; +DROP TABLE t2; -- A tablespace should not be dropped if any colocated tables are dependent on it CREATE TABLE t1 (a int) TABLESPACE tsp1; CREATE TABLE t2 (a int) TABLESPACE tsp1; diff --git a/src/postgres/src/test/regress/sql/yb_colocated_tables_with_tablespaces.sql b/src/postgres/src/test/regress/sql/yb_colocated_tables_with_tablespaces.sql index 15851e2e3a8d..b350047add37 100644 --- a/src/postgres/src/test/regress/sql/yb_colocated_tables_with_tablespaces.sql +++ b/src/postgres/src/test/regress/sql/yb_colocated_tables_with_tablespaces.sql @@ -292,6 +292,13 @@ SELECT COUNT(*) FROM pg_yb_tablegroup WHERE grpname != 'default' AND (grpname = DROP TABLE table1; DROP TABLE table2; +-- Should not be able to move a single colocated table to other tablespace +CREATE TABLE t1(a int) TABLESPACE tsp1; +CREATE TABLE t2(a int) TABLESPACE tsp1; +ALTER TABLE t1 SET TABLESPACE tsp2; +DROP TABLE t1; +DROP TABLE t2; + -- A tablespace should not be dropped if any colocated tables are dependent on it CREATE TABLE t1 (a int) TABLESPACE tsp1; CREATE TABLE t2 (a int) TABLESPACE tsp1; From 886a2a05c23298112e22ba8991c071484736c11c Mon Sep 17 00:00:00 2001 From: vkumar Date: Fri, 13 Sep 2024 17:52:54 +0000 Subject: [PATCH 20/75] [PLAT-15310] Fix failing UT on master for K8s service IPs Summary: Fix failing UT Test Plan: UT pipeline Reviewers: anijhawan Reviewed By: anijhawan Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38041 --- .../test/java/com/yugabyte/yw/common/KubernetesManagerTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/managed/src/test/java/com/yugabyte/yw/common/KubernetesManagerTest.java b/managed/src/test/java/com/yugabyte/yw/common/KubernetesManagerTest.java index 85de284f80d4..7f3f0cafb414 100644 --- a/managed/src/test/java/com/yugabyte/yw/common/KubernetesManagerTest.java +++ b/managed/src/test/java/com/yugabyte/yw/common/KubernetesManagerTest.java @@ -150,7 +150,7 @@ public void getMasterServiceIPs() { "json"), command.getValue()); assertEquals( - "There must be exactly one Master or TServer endpoint service, got 0", + "There must be atleast one Master or TServer endpoint service, got 0", exception.getMessage()); } From f157e2ef8302dcfb5dada67f0cd577435ff3b517 Mon Sep 17 00:00:00 2001 From: Mark Lillibridge Date: Thu, 5 Sep 2024 13:02:22 -0700 Subject: [PATCH 21/75] [#23917] xCluster: set up sequences_data stream(s) on source universe Summary: This is part of the new sequences replication feature. Here we do the following when setting up sequences replication on the source universe: * create the sequences_data table if it does not already exist * include an outgoing stream for it for each namespace we are replicating This feature is gated by new flag, --TEST_xcluster_enable_sequence_replication for now as well as automatic mode replication being on. Note that at this point of development automatic mode cannot set up replication in the face of master restarts successfully so the relevant test has been disabled: AutoMode/XClusterOutboundReplicationGroupParameterized.MasterRestartDuringCheckpoint/0 GitHub issue to fix the above: #23918 This feature is only usable with DB-scoped replication. Fixes #23917 Jira: DB-12820 Test Plan: There are two sets of tests for outbound replication groups; I have modified both of them, parameterizing on semi versus automatic mode xCluster replication where it makes sense. The non-parameterized tests default to automatic mode off; we will want to change that once automatic mode hits production. (For the moment, automatic mode means only turn on sequence replication but Julien will shortly be adding more to it.) Most of the time the parameterization just affects how many streams/table should be in existence, but sometimes there are extra checks like was the sequences_data table created. In particular, the tests now check that when automatic mode is being used: * that sequences_data gets created (on the source universe) * that there is one sequence stream per namespace * that WAL retention is adjusted for those streams ``` ybd --cxx-test xcluster_outbound_replication_group-test --test-args '' >& /tmp/generic.mdl.log ybd --cxx-test xcluster_outbound_replication_group-itest --test-args '' >& /tmp/generic.mdl.log ``` Reviewers: hsunder, xCluster Reviewed By: hsunder Subscribers: jhe, ybase Differential Revision: https://phorge.dev.yugabyte.com/D37823 --- ...luster_outbound_replication_group-itest.cc | 245 +++++++++++++----- src/yb/master/catalog_manager.cc | 4 +- src/yb/master/catalog_manager.h | 4 +- src/yb/master/catalog_manager_if.h | 2 +- .../master/xcluster/master_xcluster_util.cc | 10 +- src/yb/master/xcluster/master_xcluster_util.h | 3 +- ...er_inbound_replication_group_setup_task.cc | 8 +- src/yb/master/xcluster/xcluster_manager.cc | 3 + ...cluster_outbound_replication_group-test.cc | 178 +++++++++---- .../xcluster_outbound_replication_group.cc | 41 ++- .../xcluster_outbound_replication_group.h | 19 +- ...luster_outbound_replication_group_tasks.cc | 17 ++ ...cluster_outbound_replication_group_tasks.h | 1 + .../xcluster/xcluster_source_manager.cc | 26 +- 14 files changed, 421 insertions(+), 140 deletions(-) diff --git a/src/yb/integration-tests/xcluster/xcluster_outbound_replication_group-itest.cc b/src/yb/integration-tests/xcluster/xcluster_outbound_replication_group-itest.cc index 6e4f0a3cba91..ae83d8e006fe 100644 --- a/src/yb/integration-tests/xcluster/xcluster_outbound_replication_group-itest.cc +++ b/src/yb/integration-tests/xcluster/xcluster_outbound_replication_group-itest.cc @@ -11,6 +11,9 @@ // under the License. // +#include +#include + #include "yb/client/xcluster_client.h" #include "yb/client/yb_table_name.h" #include "yb/consensus/log.h" @@ -21,10 +24,13 @@ #include "yb/tablet/tablet_peer.h" #include "yb/util/backoff_waiter.h" +DECLARE_int32(update_min_cdc_indices_interval_secs); DECLARE_uint32(cdc_wal_retention_time_secs); DECLARE_uint32(max_xcluster_streams_to_checkpoint_in_parallel); DECLARE_bool(TEST_block_xcluster_checkpoint_namespace_task); -DECLARE_int32(update_min_cdc_indices_interval_secs); +DECLARE_bool(TEST_xcluster_enable_ddl_replication); +DECLARE_bool(TEST_xcluster_enable_sequence_replication); + namespace yb { namespace master { @@ -38,6 +44,11 @@ class XClusterOutboundReplicationGroupTest : public XClusterYsqlTestBase { public: XClusterOutboundReplicationGroupTest() {} void SetUp() override { + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_xcluster_enable_sequence_replication) = + UseAutomaticMode(); + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_xcluster_enable_ddl_replication) = + UseAutomaticMode(); + XClusterYsqlTestBase::SetUp(); MiniClusterOptions opts; opts.num_tablet_servers = 1; @@ -52,6 +63,21 @@ class XClusterOutboundReplicationGroupTest : public XClusterYsqlTestBase { namespace_id_ = ASSERT_RESULT(CreateYsqlNamespace(kNamespaceName)); } + virtual bool UseAutomaticMode() { + // Except for parameterized tests, we currently default to semi-automatic mode. + return false; + } + + // How many extra streams/tables a namespace has + int OverheadStreamsCount() { + if (!UseAutomaticMode()) { + return 0; + } + // So far automatic mode has one extra stream for each namespace: sequences_data. + // TODO(jhe): increment this when you add the DDL queue table + return 1; + } + Result CreateYsqlNamespace(const NamespaceName& ns_name) { CreateNamespaceResponsePB resp; RETURN_NOT_OK(CreateDatabase(&producer_cluster_, ns_name)); @@ -79,28 +105,37 @@ class XClusterOutboundReplicationGroupTest : public XClusterYsqlTestBase { void VerifyNamespaceCheckpointInfo( const TableId& table_id1, const TableId& table_id2, size_t all_xcluster_streams_count, - const master::GetXClusterStreamsResponsePB& resp, bool skip_schema_name_check = false) { + const master::GetXClusterStreamsResponsePB& resp, bool sequences_data_included, + bool skip_schema_name_check = false) { ASSERT_FALSE(resp.initial_bootstrap_required()); - ASSERT_EQ(resp.table_infos_size(), 2); + ASSERT_EQ(resp.table_infos_size(), sequences_data_included ? 3 : 2); auto all_xcluster_streams = CleanupAndGetAllXClusterStreams(); ASSERT_EQ(all_xcluster_streams.size(), all_xcluster_streams_count); std::set table_ids; for (const auto& table_info : resp.table_infos()) { + SCOPED_TRACE("table name: " + table_info.table_name()); if (table_info.table_name() == kTableName1) { ASSERT_EQ(table_info.table_id(), table_id1); } else if (table_info.table_name() == kTableName2) { ASSERT_EQ(table_info.table_id(), table_id2); + } else if (table_info.table_name() == "sequences_data") { + ASSERT_EQ(table_info.table_id(), kPgSequencesDataTableId); } else { FAIL() << "Unexpected table name: " << table_info.table_name(); } - if (skip_schema_name_check) { - // Make sure it is not empty. - ASSERT_FALSE(table_info.pg_schema_name().empty()); + if (table_info.table_id() != kPgSequencesDataTableId) { + if (skip_schema_name_check) { + // Make sure it is not empty. + ASSERT_FALSE(table_info.pg_schema_name().empty()); + } else { + ASSERT_EQ(table_info.pg_schema_name(), kPgSchemaName); + } } else { - ASSERT_EQ(table_info.pg_schema_name(), kPgSchemaName); + EXPECT_TRUE(table_info.pg_schema_name().empty()); } + ASSERT_FALSE(table_info.xrepl_stream_id().empty()); auto stream_id = ASSERT_RESULT(xrepl::StreamId::FromString(table_info.xrepl_stream_id())); ASSERT_TRUE(all_xcluster_streams.contains(stream_id)); @@ -175,7 +210,18 @@ class XClusterOutboundReplicationGroupTest : public XClusterYsqlTestBase { NamespaceId namespace_id_; }; -TEST_F(XClusterOutboundReplicationGroupTest, TestMultipleTable) { +class XClusterOutboundReplicationGroupParameterized : public XClusterOutboundReplicationGroupTest, + public ::testing::WithParamInterface { + public: + bool UseAutomaticMode() override { return GetParam(); } +}; + +INSTANTIATE_TEST_CASE_P( + AutoMode, XClusterOutboundReplicationGroupParameterized, ::testing::Values(true)); +INSTANTIATE_TEST_CASE_P( + SemiMode, XClusterOutboundReplicationGroupParameterized, ::testing::Values(false)); + +TEST_P(XClusterOutboundReplicationGroupParameterized, TestMultipleTable) { ANNOTATE_UNPROTECTED_WRITE(FLAGS_max_xcluster_streams_to_checkpoint_in_parallel) = 1; ANNOTATE_UNPROTECTED_WRITE(FLAGS_update_min_cdc_indices_interval_secs) = 5; @@ -186,21 +232,32 @@ TEST_F(XClusterOutboundReplicationGroupTest, TestMultipleTable) { ASSERT_NOK(GetXClusterStreams(kReplicationGroupId, namespace_id_)); - ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup(kReplicationGroupId, {namespace_id_})); + ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( + kReplicationGroupId, {namespace_id_}, UseAutomaticMode())); auto resp = ASSERT_RESULT(GetXClusterStreams(kReplicationGroupId, namespace_id_)); - // We should have 2 streams now. - size_t stream_count = 2; + if (UseAutomaticMode()) { + // In automatic mode, sequences_data should have been created. + ASSERT_TRUE(catalog_manager_->GetTableInfo(kPgSequencesDataTableId)); + } + + // We should have 2 normal streams now. + size_t stream_count = 2 + OverheadStreamsCount(); ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo( - table_id_1, table_id_2, stream_count, resp, /*skip_schema_name_check=*/true)); + table_id_1, table_id_2, stream_count, resp, /*sequences_data_included=*/UseAutomaticMode(), + /*skip_schema_name_check=*/true)); for (const auto& table_info : resp.table_infos()) { // Order is not deterministic so search with the table name. if (table_info.table_name() == kTableName1) { ASSERT_EQ(table_info.pg_schema_name(), kPgSchemaName); - } else { + } else if (table_info.table_name() == kTableName2) { ASSERT_EQ(table_info.pg_schema_name(), pg_schema_name2); + } else if (table_info.table_name() == "sequences_data") { + EXPECT_EQ(table_info.pg_schema_name(), ""); + } else { + FAIL() << "unknown tablename " << table_info.table_name(); } } @@ -209,7 +266,8 @@ TEST_F(XClusterOutboundReplicationGroupTest, TestMultipleTable) { kReplicationGroupId, namespace_id_, {kTableName2, kTableName1}, {pg_schema_name2, kPgSchemaName})); ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo( - table_id_1, table_id_2, stream_count, resp, /*skip_schema_name_check=*/true)); + table_id_1, table_id_2, stream_count, resp, /*sequences_data_included=*/false, + /*skip_schema_name_check=*/true)); ASSERT_EQ(resp.table_infos(0).pg_schema_name(), pg_schema_name2); ASSERT_EQ(resp.table_infos(1).pg_schema_name(), kPgSchemaName); ASSERT_EQ(resp.table_infos(0).table_name(), kTableName2); @@ -217,6 +275,9 @@ TEST_F(XClusterOutboundReplicationGroupTest, TestMultipleTable) { ASSERT_OK(VerifyWalRetentionOfTable(table_id_1)); ASSERT_OK(VerifyWalRetentionOfTable(table_id_2)); + if (UseAutomaticMode()) { + ASSERT_OK(VerifyWalRetentionOfTable(kPgSequencesDataTableId)); + } ASSERT_OK(XClusterClient().DeleteOutboundReplicationGroup( kReplicationGroupId, /*target_master_addresses=*/{})); @@ -227,7 +288,7 @@ TEST_F(XClusterOutboundReplicationGroupTest, TestMultipleTable) { ASSERT_TRUE(all_xcluster_streams.empty()); } -TEST_F(XClusterOutboundReplicationGroupTest, AddDeleteNamespaces) { +TEST_P(XClusterOutboundReplicationGroupParameterized, AddDeleteNamespaces) { auto ns1_table_id_1 = ASSERT_RESULT(CreateYsqlTable(kNamespaceName, kTableName1)); auto ns1_table_id_2 = ASSERT_RESULT(CreateYsqlTable(kNamespaceName, kTableName2)); @@ -236,15 +297,16 @@ TEST_F(XClusterOutboundReplicationGroupTest, AddDeleteNamespaces) { auto ns2_table_id_1 = ASSERT_RESULT(CreateYsqlTable(namespace_name_2, kTableName1)); auto ns2_table_id_2 = ASSERT_RESULT(CreateYsqlTable(namespace_name_2, kTableName2)); - ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup(kReplicationGroupId, {namespace_id_})); + ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( + kReplicationGroupId, {namespace_id_}, UseAutomaticMode())); // Wait for the new streams to be ready. auto ns1_info = ASSERT_RESULT(GetXClusterStreams(kReplicationGroupId, namespace_id_)); - // We should have 2 streams now. - size_t stream_count = 2; + // We should have 2 normal streams now. + size_t stream_count = 2 + OverheadStreamsCount(); auto all_xcluster_streams_initial = CleanupAndGetAllXClusterStreams(); - ASSERT_EQ(all_xcluster_streams_initial.size(), 2); + ASSERT_EQ(all_xcluster_streams_initial.size(), 2 + OverheadStreamsCount()); // Make sure invalid namespace id is handled correctly. ASSERT_NOK(GetXClusterStreams(kReplicationGroupId, "BadId")); @@ -252,15 +314,16 @@ TEST_F(XClusterOutboundReplicationGroupTest, AddDeleteNamespaces) { // Make sure only the namespace that was added is returned. ASSERT_NOK(GetXClusterStreams(kReplicationGroupId, namespace_id_2)); - ASSERT_NO_FATALS( - VerifyNamespaceCheckpointInfo(ns1_table_id_1, ns1_table_id_2, stream_count, ns1_info)); + ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo( + ns1_table_id_1, ns1_table_id_2, stream_count, ns1_info, + /*sequences_data_included=*/UseAutomaticMode())); // Add the second namespace. ASSERT_OK(client::XClusterClient(*client_).AddNamespaceToOutboundReplicationGroup( kReplicationGroupId, namespace_id_2)); - // We should have 4 streams now. - stream_count = 4; + // We should have 4 normal streams now. + stream_count = 4 + 2 * OverheadStreamsCount(); // The info of the first namespace should not change. auto ns1_info_dup = ASSERT_RESULT(GetXClusterStreams(kReplicationGroupId, namespace_id_)); @@ -268,8 +331,9 @@ TEST_F(XClusterOutboundReplicationGroupTest, AddDeleteNamespaces) { // Validate the seconds namespace. auto ns2_info = ASSERT_RESULT(GetXClusterStreams(kReplicationGroupId, namespace_id_2)); - ASSERT_NO_FATALS( - VerifyNamespaceCheckpointInfo(ns2_table_id_1, ns2_table_id_2, stream_count, ns2_info)); + ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo( + ns2_table_id_1, ns2_table_id_2, stream_count, ns2_info, + /*sequences_data_included=*/UseAutomaticMode())); ASSERT_OK(XClusterClient().RemoveNamespaceFromOutboundReplicationGroup( kReplicationGroupId, namespace_id_, /*target_master_addresses=*/{})); @@ -278,7 +342,7 @@ TEST_F(XClusterOutboundReplicationGroupTest, AddDeleteNamespaces) { // We should only have only the streams from second namespace. { auto new_xcluster_streams = CleanupAndGetAllXClusterStreams(); - ASSERT_EQ(new_xcluster_streams.size(), 2); + ASSERT_EQ(new_xcluster_streams.size(), 2 + OverheadStreamsCount()); // new_xcluster_streams and all_xcluster_streams should not overlap. for (const auto& stream : new_xcluster_streams) { @@ -293,19 +357,20 @@ TEST_F(XClusterOutboundReplicationGroupTest, AddDeleteNamespaces) { ASSERT_TRUE(final_xcluster_streams.empty()); } -TEST_F(XClusterOutboundReplicationGroupTest, AddTable) { +TEST_P(XClusterOutboundReplicationGroupParameterized, AddTable) { ANNOTATE_UNPROTECTED_WRITE(FLAGS_update_min_cdc_indices_interval_secs) = 5; auto table_id_1 = ASSERT_RESULT(CreateYsqlTable(kNamespaceName, kTableName1)); ASSERT_OK(VerifyWalRetentionOfTable(table_id_1, 900)); - ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup(kReplicationGroupId, {namespace_id_})); + ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( + kReplicationGroupId, {namespace_id_}, UseAutomaticMode())); // Wait for the new streams to be ready. ASSERT_OK(GetXClusterStreams(kReplicationGroupId, namespace_id_)); auto all_xcluster_streams_initial = CleanupAndGetAllXClusterStreams(); - ASSERT_EQ(all_xcluster_streams_initial.size(), 1); + ASSERT_EQ(all_xcluster_streams_initial.size(), 1 + OverheadStreamsCount()); ASSERT_OK(VerifyWalRetentionOfTable(table_id_1)); @@ -313,8 +378,10 @@ TEST_F(XClusterOutboundReplicationGroupTest, AddTable) { auto ns1_info = ASSERT_RESULT(GetXClusterStreams(kReplicationGroupId, namespace_id_)); - size_t stream_count = 2; - ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo(table_id_1, table_id_2, stream_count, ns1_info)); + size_t stream_count = 2 + OverheadStreamsCount(); + ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo( + table_id_1, table_id_2, stream_count, ns1_info, + /*sequences_data_included=*/UseAutomaticMode())); ASSERT_OK(VerifyWalRetentionOfTable(table_id_2)); } @@ -323,7 +390,8 @@ TEST_F(XClusterOutboundReplicationGroupTest, IsBootstrapRequiredEmptyTable) { ANNOTATE_UNPROTECTED_WRITE(FLAGS_max_xcluster_streams_to_checkpoint_in_parallel) = 1; auto table_id_1 = ASSERT_RESULT(CreateYsqlTable(kNamespaceName, kTableName1)); - ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup(kReplicationGroupId, {namespace_id_})); + ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( + kReplicationGroupId, {namespace_id_}, UseAutomaticMode())); std::promise> promise; @@ -344,7 +412,8 @@ TEST_F(XClusterOutboundReplicationGroupTest, IsBootstrapRequiredTableWithData) { ASSERT_OK(producer_client()->OpenTable(table_id_2, &table_2)); ASSERT_OK(InsertRowsInProducer(0, 10, table_2)); - ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup(kReplicationGroupId, {namespace_id_})); + ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( + kReplicationGroupId, {namespace_id_}, UseAutomaticMode())); std::promise> promise; @@ -366,7 +435,8 @@ TEST_F(XClusterOutboundReplicationGroupTest, IsBootstrapRequiredTableWithDeleted ASSERT_OK(InsertRowsInProducer(0, 10, table_2)); ASSERT_OK(DeleteRowsInProducer(0, 10, table_2)); - ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup(kReplicationGroupId, {namespace_id_})); + ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( + kReplicationGroupId, {namespace_id_}, UseAutomaticMode())); std::promise> promise; @@ -378,14 +448,22 @@ TEST_F(XClusterOutboundReplicationGroupTest, IsBootstrapRequiredTableWithDeleted ASSERT_FALSE(is_bootstrap_required); } -TEST_F(XClusterOutboundReplicationGroupTest, MasterRestartDuringCheckpoint) { +TEST_P(XClusterOutboundReplicationGroupParameterized, MasterRestartDuringCheckpoint) { + // Temporarily disabling this during automatic mode because automatic mode does not yet + // successfully survive a master restart during a checkpoint. + // TODO(GitHub issue #23918): fix automatic mode so it passes this test then reenable this test. + if (UseAutomaticMode()) { + return; + } + ANNOTATE_UNPROTECTED_WRITE(FLAGS_max_xcluster_streams_to_checkpoint_in_parallel) = 1; auto table_id_1 = ASSERT_RESULT(CreateYsqlTable(kNamespaceName, kTableName1)); auto table_id_2 = ASSERT_RESULT(CreateYsqlTable(kNamespaceName, kTableName2)); ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_block_xcluster_checkpoint_namespace_task) = true; - ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup(kReplicationGroupId, {namespace_id_})); + ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( + kReplicationGroupId, {namespace_id_}, UseAutomaticMode())); std::promise> promise; auto future = promise.get_future(); @@ -395,26 +473,29 @@ TEST_F(XClusterOutboundReplicationGroupTest, MasterRestartDuringCheckpoint) { ASSERT_EQ(future.wait_for(5s), std::future_status::timeout); + LOG(INFO) << "***** Restarting Master now..."; ASSERT_OK(RestartMaster()); ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_block_xcluster_checkpoint_namespace_task) = false; auto resp = ASSERT_RESULT(future.get()); - size_t stream_count = 2; - ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo(table_id_1, table_id_2, stream_count, resp)); + size_t stream_count = 2 + OverheadStreamsCount(); + ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo( + table_id_1, table_id_2, stream_count, resp, /*sequences_data_included=*/UseAutomaticMode())); auto all_xcluster_streams_initial = CleanupAndGetAllXClusterStreams(); ASSERT_EQ(all_xcluster_streams_initial.size(), stream_count); } -TEST_F(XClusterOutboundReplicationGroupTest, Repair) { +TEST_P(XClusterOutboundReplicationGroupParameterized, Repair) { auto table_id_1 = ASSERT_RESULT(CreateYsqlTable(kNamespaceName, kTableName1)); auto table_id_2 = ASSERT_RESULT(CreateYsqlTable(kNamespaceName, kTableName2)); - ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup(kReplicationGroupId, {namespace_id_})); + ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( + kReplicationGroupId, {namespace_id_}, UseAutomaticMode())); auto resp = ASSERT_RESULT(GetXClusterStreams(kReplicationGroupId, namespace_id_)); - ASSERT_EQ(resp.table_infos_size(), 2); + ASSERT_EQ(resp.table_infos_size(), 2 + OverheadStreamsCount()); ASSERT_NOK_STR_CONTAINS( XClusterClient().RepairOutboundXClusterReplicationGroupRemoveTable( @@ -435,10 +516,16 @@ TEST_F(XClusterOutboundReplicationGroupTest, Repair) { "not found in xClusterOutboundReplicationGroup"); resp = ASSERT_RESULT(GetXClusterStreams(kReplicationGroupId, namespace_id_)); - ASSERT_EQ(resp.table_infos_size(), 1); - ASSERT_EQ(resp.table_infos(0).table_id(), table_id_2); + ASSERT_EQ(resp.table_infos_size(), 1 + OverheadStreamsCount()); + int found = -1; + for (int i = 0; i < resp.table_infos_size(); i++) { + if (resp.table_infos(i).table_id() == table_id_2) { + found = i; + } + } + ASSERT_NE(found, -1); const auto table2_stream_id = - ASSERT_RESULT(xrepl::StreamId::FromString(resp.table_infos(0).xrepl_stream_id())); + ASSERT_RESULT(xrepl::StreamId::FromString(resp.table_infos(found).xrepl_stream_id())); ASSERT_NOK_STR_CONTAINS( GetXClusterStreams(kReplicationGroupId, namespace_id_, {kTableName1}, {kPgSchemaName}), @@ -478,33 +565,46 @@ TEST_F(XClusterOutboundReplicationGroupTest, Repair) { "already exists in"); resp = ASSERT_RESULT(GetXClusterStreams(kReplicationGroupId, namespace_id_)); - ASSERT_EQ(resp.table_infos_size(), 2); - ASSERT_EQ(resp.table_infos(0).table_id(), table_id_2); + ASSERT_EQ(resp.table_infos_size(), 2 + OverheadStreamsCount()); + found = -1; + for (int i = 0; i < resp.table_infos_size(); i++) { + if (resp.table_infos(i).table_id() == table_id_2) { + found = i; + } + } + ASSERT_NE(found, -1); for (const auto& table_info : resp.table_infos()) { - auto stream_id_str = new_stream_id.ToString(); - if (table_info.table_id() == table_id_2) { - stream_id_str = table2_stream_id.ToString(); + if (table_info.table_id() == table_id_1) { + auto stream_id_str = new_stream_id.ToString(); + ASSERT_EQ(table_info.xrepl_stream_id(), stream_id_str); + } else if (table_info.table_id() == table_id_2) { + auto stream_id_str = table2_stream_id.ToString(); + ASSERT_EQ(table_info.xrepl_stream_id(), stream_id_str); } - ASSERT_EQ(table_info.xrepl_stream_id(), stream_id_str); } } -TEST_F(XClusterOutboundReplicationGroupTest, RepairWithYbAdmin) { +TEST_P(XClusterOutboundReplicationGroupParameterized, RepairWithYbAdmin) { auto table_id_1 = ASSERT_RESULT(CreateYsqlTable(kNamespaceName, kTableName1)); auto table_id_2 = ASSERT_RESULT(CreateYsqlTable(kNamespaceName, kTableName2)); - ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup(kReplicationGroupId, {namespace_id_})); + ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( + kReplicationGroupId, {namespace_id_}, UseAutomaticMode())); auto resp = ASSERT_RESULT(GetXClusterStreams(kReplicationGroupId, namespace_id_)); - ASSERT_EQ(resp.table_infos_size(), 2); + ASSERT_EQ(resp.table_infos_size(), 2 + OverheadStreamsCount()); ASSERT_OK(CallAdmin( producer_cluster(), "repair_xcluster_outbound_replication_remove_table", kReplicationGroupId, table_id_1)); resp = ASSERT_RESULT(GetXClusterStreams(kReplicationGroupId, namespace_id_)); - ASSERT_EQ(resp.table_infos_size(), 1); - ASSERT_EQ(resp.table_infos(0).table_id(), table_id_2); + ASSERT_EQ(resp.table_infos_size(), 1 + OverheadStreamsCount()); + std::vector table_ids; + for (const auto& table_info : resp.table_infos()) { + table_ids.push_back(table_info.table_id()); + } + EXPECT_THAT(table_ids, testing::Contains(table_id_2)); const auto new_stream_ids = ASSERT_RESULT(BootstrapProducer(producer_cluster(), client_, {table_id_1})); @@ -516,12 +616,12 @@ TEST_F(XClusterOutboundReplicationGroupTest, RepairWithYbAdmin) { table_id_1, new_stream_id.ToString())); resp = ASSERT_RESULT(GetXClusterStreams(kReplicationGroupId, namespace_id_)); - ASSERT_EQ(resp.table_infos_size(), 2); + ASSERT_EQ(resp.table_infos_size(), 2 + OverheadStreamsCount()); } // Validate the GetXClusterOutboundReplicationGroupInfo, and // GetXClusterOutboundReplicationGroups RPCs. -TEST_F(XClusterOutboundReplicationGroupTest, TestListAPIs) { +TEST_P(XClusterOutboundReplicationGroupParameterized, TestListAPIs) { // Create two DBs with different table counts. ASSERT_OK(CreateYsqlTable(kNamespaceName, kTableName1)); @@ -532,7 +632,7 @@ TEST_F(XClusterOutboundReplicationGroupTest, TestListAPIs) { // Replication group 1 with two namespaces. ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( - kReplicationGroupId, {namespace_id_, namespace_id_2})); + kReplicationGroupId, {namespace_id_, namespace_id_2}, UseAutomaticMode())); // Wait for checkpointing to complete. ASSERT_OK(GetXClusterStreams(kReplicationGroupId, namespace_id_)); ASSERT_OK(GetXClusterStreams(kReplicationGroupId, namespace_id_2)); @@ -542,13 +642,14 @@ TEST_F(XClusterOutboundReplicationGroupTest, TestListAPIs) { ASSERT_EQ(group_info.size(), 2); ASSERT_TRUE(group_info.contains(namespace_id_)); ASSERT_TRUE(group_info.contains(namespace_id_2)); - ASSERT_EQ(group_info[namespace_id_].size(), 1); - ASSERT_EQ(group_info[namespace_id_2].size(), 2); + ASSERT_EQ(group_info[namespace_id_].size(), 1 + OverheadStreamsCount()); + ASSERT_EQ(group_info[namespace_id_2].size(), 2 + OverheadStreamsCount()); } // Replication group 2 with one namespace. const xcluster::ReplicationGroupId replication_group2("rg2"); - ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup(replication_group2, {namespace_id_})); + ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( + replication_group2, {namespace_id_}, UseAutomaticMode())); ASSERT_OK(GetXClusterStreams(replication_group2, namespace_id_)); // Wait for checkpointing to complete. { @@ -556,7 +657,7 @@ TEST_F(XClusterOutboundReplicationGroupTest, TestListAPIs) { ASSERT_RESULT(XClusterClient().GetXClusterOutboundReplicationGroupInfo(replication_group2)); ASSERT_EQ(group_info.size(), 1); ASSERT_TRUE(group_info.contains(namespace_id_)); - ASSERT_EQ(group_info[namespace_id_].size(), 1); + ASSERT_EQ(group_info[namespace_id_].size(), 1 + OverheadStreamsCount()); } // List groups for a namespace without any replication groups. @@ -618,10 +719,11 @@ TEST_F(XClusterOutboundReplicationGroupTest, TestListAPIs) { } // Make sure we cleanup the streams of the failed table create. -TEST_F(XClusterOutboundReplicationGroupTest, CleanupStreamsOfFailedTableCreate) { +TEST_P(XClusterOutboundReplicationGroupParameterized, CleanupStreamsOfFailedTableCreate) { auto table_id_1 = ASSERT_RESULT(CreateYsqlTable(kNamespaceName, kTableName1)); - ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup(kReplicationGroupId, {namespace_id_})); - int expected_stream_count = 1; + ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( + kReplicationGroupId, {namespace_id_}, UseAutomaticMode())); + int expected_stream_count = 1 + OverheadStreamsCount(); auto check_streams = [&]() -> Status { auto resp = VERIFY_RESULT(GetXClusterStreams(kReplicationGroupId, namespace_id_)); @@ -656,10 +758,11 @@ TEST_F(XClusterOutboundReplicationGroupTest, CleanupStreamsOfFailedTableCreate) ASSERT_OK(check_streams()); } -TEST_F(XClusterOutboundReplicationGroupTest, TestGetStreamByTableId) { +TEST_P(XClusterOutboundReplicationGroupParameterized, TestGetStreamByTableId) { auto table_id_1 = ASSERT_RESULT(CreateYsqlTable(kNamespaceName, kTableName1)); - ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup(kReplicationGroupId, {namespace_id_})); + ASSERT_OK(XClusterClient().CreateOutboundReplicationGroup( + kReplicationGroupId, {namespace_id_}, UseAutomaticMode())); // Delete the table to put it into HIDDEN state. ASSERT_OK(DropYsqlTable(&producer_cluster_, kNamespaceName, kPgSchemaName, kTableName1)); @@ -683,6 +786,14 @@ TEST_F(XClusterOutboundReplicationGroupTest, TestGetStreamByTableId) { ASSERT_EQ(ns_info.table_infos(0).table_id(), table_id_1); ASSERT_EQ(ns_info.table_infos(1).table_id(), table_id_2); + if (UseAutomaticMode()) { + // Verify that we can request sequences_data by its table id. + auto ns_info = ASSERT_RESULT( + GetXClusterStreamsByTableId(kReplicationGroupId, namespace_id_, {kPgSequencesDataTableId})); + ASSERT_EQ(ns_info.table_infos_size(), 1); + ASSERT_EQ(ns_info.table_infos(0).table_id(), kPgSequencesDataTableId); + } + // Verify that we can request a table that does not exist. ASSERT_NOK_STR_CONTAINS( GetXClusterStreamsByTableId(kReplicationGroupId, namespace_id_, {"bad_table_id"}), diff --git a/src/yb/master/catalog_manager.cc b/src/yb/master/catalog_manager.cc index a6676c307eca..9408d8eb8784 100644 --- a/src/yb/master/catalog_manager.cc +++ b/src/yb/master/catalog_manager.cc @@ -7864,11 +7864,11 @@ Status CatalogManager::ListTables(const ListTablesRequestPB* req, return Status::OK(); } -scoped_refptr CatalogManager::GetTableInfoUnlocked(const TableId& table_id) { +scoped_refptr CatalogManager::GetTableInfoUnlocked(const TableId& table_id) const { return tables_->FindTableOrNull(table_id); } -scoped_refptr CatalogManager::GetTableInfo(const TableId& table_id) { +scoped_refptr CatalogManager::GetTableInfo(const TableId& table_id) const { SharedLock lock(mutex_); return GetTableInfoUnlocked(table_id); } diff --git a/src/yb/master/catalog_manager.h b/src/yb/master/catalog_manager.h index fc765a463e3e..bc5ff814f6d8 100644 --- a/src/yb/master/catalog_manager.h +++ b/src/yb/master/catalog_manager.h @@ -787,8 +787,8 @@ class CatalogManager : public tserver::TabletPeerLookupIf, bool IsLoadBalancerEnabled() override; // Return the table info for the table with the specified UUID, if it exists. - TableInfoPtr GetTableInfo(const TableId& table_id) EXCLUDES(mutex_) override; - TableInfoPtr GetTableInfoUnlocked(const TableId& table_id) REQUIRES_SHARED(mutex_); + TableInfoPtr GetTableInfo(const TableId& table_id) const EXCLUDES(mutex_) override; + TableInfoPtr GetTableInfoUnlocked(const TableId& table_id) const REQUIRES_SHARED(mutex_); // Gets the table info for each table id, or sets it to null if the table id was not found. std::unordered_map GetTableInfos(const std::vector& table_ids) diff --git a/src/yb/master/catalog_manager_if.h b/src/yb/master/catalog_manager_if.h index 827745974873..baf26b1b1d57 100644 --- a/src/yb/master/catalog_manager_if.h +++ b/src/yb/master/catalog_manager_if.h @@ -147,7 +147,7 @@ class CatalogManagerIf { virtual bool IsUserIndex(const TableInfo& table) const = 0; - virtual TableInfoPtr GetTableInfo(const TableId& table_id) = 0; + virtual TableInfoPtr GetTableInfo(const TableId& table_id) const = 0; virtual Result GetTableReplicationInfo( const ReplicationInfoPB& table_replication_info, diff --git a/src/yb/master/xcluster/master_xcluster_util.cc b/src/yb/master/xcluster/master_xcluster_util.cc index fcca63276392..1affc1c6cf52 100644 --- a/src/yb/master/xcluster/master_xcluster_util.cc +++ b/src/yb/master/xcluster/master_xcluster_util.cc @@ -65,11 +65,19 @@ std::string GetFullTableName(const TableInfo& table_info) { } Result> GetTablesEligibleForXClusterReplication( - const CatalogManager& catalog_manager, const NamespaceId& namespace_id) { + const CatalogManager& catalog_manager, const NamespaceId& namespace_id, + bool include_sequences_data) { auto table_infos = VERIFY_RESULT(catalog_manager.GetTableInfosForNamespace(namespace_id)); EraseIf( [](const TableInfoPtr& table) { return !IsTableEligibleForXClusterReplication(*table); }, &table_infos); + + if (include_sequences_data) { + auto sequence_table_info = catalog_manager.GetTableInfo(kPgSequencesDataTableId); + if (sequence_table_info) { + table_infos.push_back(std::move(sequence_table_info)); + } + } return table_infos; } diff --git a/src/yb/master/xcluster/master_xcluster_util.h b/src/yb/master/xcluster/master_xcluster_util.h index 0a6be86669dc..1265db8b9a27 100644 --- a/src/yb/master/xcluster/master_xcluster_util.h +++ b/src/yb/master/xcluster/master_xcluster_util.h @@ -28,7 +28,8 @@ bool IsTableEligibleForXClusterReplication(const master::TableInfo& table); std::string GetFullTableName(const TableInfo& table_info); Result> GetTablesEligibleForXClusterReplication( - const CatalogManager& catalog_manager, const NamespaceId& namespace_id); + const CatalogManager& catalog_manager, const NamespaceId& namespace_id, + bool include_sequences_data); bool IsDbScoped(const SysUniverseReplicationEntryPB& replication_info); diff --git a/src/yb/master/xcluster/xcluster_inbound_replication_group_setup_task.cc b/src/yb/master/xcluster/xcluster_inbound_replication_group_setup_task.cc index 5c1bab9dbd4d..19b8ed3897db 100644 --- a/src/yb/master/xcluster/xcluster_inbound_replication_group_setup_task.cc +++ b/src/yb/master/xcluster/xcluster_inbound_replication_group_setup_task.cc @@ -54,6 +54,8 @@ DEFINE_test_flag(bool, exit_unfinished_merging, false, DECLARE_bool(enable_xcluster_auto_flag_validation); +DECLARE_bool(TEST_xcluster_enable_sequence_replication); + using namespace std::placeholders; namespace yb::master { @@ -550,8 +552,10 @@ Status XClusterInboundReplicationGroupSetupTask::ValidateTableListForDbScoped() std::set validated_tables; for (const auto& namespace_id : target_namespace_ids_) { - auto table_infos = - VERIFY_RESULT(GetTablesEligibleForXClusterReplication(catalog_manager_, namespace_id)); + auto table_infos = VERIFY_RESULT(GetTablesEligibleForXClusterReplication( + catalog_manager_, namespace_id, + /*include_sequences_data=*/ + (automatic_ddl_mode_ && FLAGS_TEST_xcluster_enable_sequence_replication))); std::vector missing_tables; diff --git a/src/yb/master/xcluster/xcluster_manager.cc b/src/yb/master/xcluster/xcluster_manager.cc index e01ad15d84d2..7acde37ac866 100644 --- a/src/yb/master/xcluster/xcluster_manager.cc +++ b/src/yb/master/xcluster/xcluster_manager.cc @@ -46,6 +46,9 @@ DEFINE_RUNTIME_AUTO_bool(enable_tablet_split_of_xcluster_replicated_tables, kExt DEFINE_test_flag(bool, xcluster_enable_ddl_replication, false, "Enables xCluster automatic DDL replication."); +DEFINE_test_flag(bool, xcluster_enable_sequence_replication, false, + "Enables xCluster automatic replication of sequences."); + #define LOG_FUNC_AND_RPC \ LOG_WITH_FUNC(INFO) << req->ShortDebugString() << ", from: " << RequestorString(rpc) diff --git a/src/yb/master/xcluster/xcluster_outbound_replication_group-test.cc b/src/yb/master/xcluster/xcluster_outbound_replication_group-test.cc index 74d5154ef95b..2e734c6dcba9 100644 --- a/src/yb/master/xcluster/xcluster_outbound_replication_group-test.cc +++ b/src/yb/master/xcluster/xcluster_outbound_replication_group-test.cc @@ -29,6 +29,8 @@ DECLARE_bool(TEST_enable_sync_points); DECLARE_bool(TEST_block_xcluster_checkpoint_namespace_task); +DECLARE_bool(TEST_xcluster_enable_ddl_replication); +DECLARE_bool(TEST_xcluster_enable_sequence_replication); using namespace std::placeholders; using testing::_; @@ -62,11 +64,13 @@ class XClusterOutboundReplicationGroupTaskFactoryMocked class XClusterOutboundReplicationGroupMocked : public XClusterOutboundReplicationGroup { public: explicit XClusterOutboundReplicationGroupMocked( - const xcluster::ReplicationGroupId& replication_group_id, HelperFunctions helper_functions, + const xcluster::ReplicationGroupId& replication_group_id, + const SysXClusterOutboundReplicationGroupEntryPB& outbound_replication_group_pb, + HelperFunctions helper_functions, XClusterOutboundReplicationGroupTaskFactoryMocked& task_factory) : XClusterOutboundReplicationGroup( - replication_group_id, {}, std::move(helper_functions), /*tasks_tracker=*/nullptr, - task_factory) { + replication_group_id, outbound_replication_group_pb, std::move(helper_functions), + /*tasks_tracker=*/nullptr, task_factory) { remote_client_ = std::make_shared(); } @@ -168,6 +172,29 @@ class XClusterOutboundReplicationGroupMockedTest : public YBTest { } } + void SetUp() { + YBTest::SetUp(); + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_xcluster_enable_ddl_replication) = + UseAutomaticMode(); + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_xcluster_enable_sequence_replication) = + UseAutomaticMode(); + } + + virtual bool UseAutomaticMode() { + // Except for parameterized tests, we currently default to semi-automatic mode. + return false; + } + + // How many extra streams/tables a namespace has + int OverheadStreamsCount() { + if (!UseAutomaticMode()) { + return 0; + } + // So far automatic mode has one extra stream for each namespace: sequences_data. + // TODO(jhe): increment this when you add the DDL queue table + return 1; + } + void CreateNamespace(const NamespaceName& namespace_name, const NamespaceId& namespace_id) { scoped_refptr ns = new NamespaceInfo(namespace_id, /*tasks_tracker=*/nullptr); auto l = ns->LockForWrite(); @@ -190,11 +217,13 @@ class XClusterOutboundReplicationGroupMockedTest : public YBTest { pb.set_table_type(PGSQL_TABLE_TYPE); l.Commit(); + std::lock_guard l2(mutex_); namespace_tables[namespace_id].push_back(table_info); return table_info; } void DropTable(const NamespaceId& namespace_id, const TableId& table_id) { + std::lock_guard l(mutex_); auto it = std::find_if( namespace_tables[namespace_id].begin(), namespace_tables[namespace_id].end(), [&table_id](const auto& table_info) { return table_info->id() == table_id; }); @@ -205,8 +234,10 @@ class XClusterOutboundReplicationGroupMockedTest : public YBTest { } std::shared_ptr CreateReplicationGroup() { + SysXClusterOutboundReplicationGroupEntryPB outbound_replication_group_pb{}; + outbound_replication_group_pb.set_automatic_ddl_mode(UseAutomaticMode()); return std::make_shared( - kReplicationGroupId, helper_functions, *task_factory); + kReplicationGroupId, outbound_replication_group_pb, helper_functions, *task_factory); } scoped_refptr CreateXClusterStream(const TableId& table_id) { @@ -215,7 +246,8 @@ class XClusterOutboundReplicationGroupMockedTest : public YBTest { return make_scoped_refptr(stream_id); } - std::unordered_map> namespace_tables; + mutable std::shared_mutex mutex_; + std::unordered_map> namespace_tables GUARDED_BY(mutex_); std::unordered_map> namespace_infos; std::unordered_set xcluster_streams; std::unique_ptr thread_pool; @@ -223,10 +255,26 @@ class XClusterOutboundReplicationGroupMockedTest : public YBTest { std::unique_ptr task_factory; XClusterOutboundReplicationGroup::HelperFunctions helper_functions = { + .create_sequences_data_table_func = + [this]() { + (void)CreateTable( + kPgSequencesDataNamespaceId, kPgSequencesDataTableId, "sequences_data", ""); + return Status::OK(); + }, .get_namespace_func = std::bind(&XClusterOutboundReplicationGroupMockedTest::GetNamespace, this, _1), .get_tables_func = - [this](const NamespaceId& namespace_id) { return namespace_tables[namespace_id]; }, + [this](const NamespaceId& namespace_id, bool include_sequences_data) { + std::lock_guard l(mutex_); + auto tables = namespace_tables[namespace_id]; + if (include_sequences_data) { + auto sequences_tables = namespace_tables[kPgSequencesDataNamespaceId]; + if (sequences_tables.size() > 0) { + tables.push_back(sequences_tables.back()); + } + } + return tables; + }, .create_xcluster_streams_func = [this](const std::vector& table_ids, const LeaderEpoch&) { auto create_context = std::make_unique(); @@ -277,35 +325,54 @@ class XClusterOutboundReplicationGroupMockedTest : public YBTest { void VerifyNamespaceCheckpointInfo( const TableId& table_id1, const TableId& table_id2, const NamespaceCheckpointInfo& ns_info, - bool skip_schema_name_check = false) { - ASSERT_FALSE(ns_info.initial_bootstrap_required); - ASSERT_EQ(ns_info.table_infos.size(), 2); + bool sequences_data_included, bool skip_schema_name_check = false) { + EXPECT_FALSE(ns_info.initial_bootstrap_required); + ASSERT_EQ(ns_info.table_infos.size(), sequences_data_included ? 3 : 2); std::set table_ids; for (const auto& table_info : ns_info.table_infos) { + SCOPED_TRACE("table name: " + table_info.table_name); if (table_info.table_name == kTableName1) { ASSERT_EQ(table_info.table_id, table_id1); } else if (table_info.table_name == kTableName2) { ASSERT_EQ(table_info.table_id, table_id2); + } else if (table_info.table_name == "sequences_data") { + ASSERT_EQ(table_info.table_id, kPgSequencesDataTableId); } else { FAIL() << "Unexpected table name: " << table_info.table_name; } - if (skip_schema_name_check) { - // Make sure it is not empty. - ASSERT_FALSE(table_info.pg_schema_name.empty()); + if (table_info.table_id != kPgSequencesDataTableId) { + if (skip_schema_name_check) { + // Make sure it is not empty. + EXPECT_FALSE(table_info.pg_schema_name.empty()); + } else { + EXPECT_EQ(table_info.pg_schema_name, kPgSchemaName); + } } else { - ASSERT_EQ(table_info.pg_schema_name, kPgSchemaName); + EXPECT_TRUE(table_info.pg_schema_name.empty()); } - ASSERT_FALSE(table_info.stream_id.IsNil()); - ASSERT_TRUE(xcluster_streams.contains(table_info.stream_id)); + EXPECT_FALSE(table_info.stream_id.IsNil()); + EXPECT_TRUE(xcluster_streams.contains(table_info.stream_id)); table_ids.insert(table_info.table_id); } - ASSERT_TRUE(table_ids.contains(table_id1)); - ASSERT_TRUE(table_ids.contains(table_id2)); + EXPECT_TRUE(table_ids.contains(table_id1)); + EXPECT_TRUE(table_ids.contains(table_id2)); } }; -TEST_F(XClusterOutboundReplicationGroupMockedTest, TestMultipleTable) { +class XClusterOutboundReplicationGroupMockedParameterized + : public XClusterOutboundReplicationGroupMockedTest, + public ::testing::WithParamInterface { + public: + bool UseAutomaticMode() override { return GetParam(); } +}; + +INSTANTIATE_TEST_CASE_P( + AutoMode, XClusterOutboundReplicationGroupMockedParameterized, ::testing::Values(true)); +INSTANTIATE_TEST_CASE_P( + SemiMode, XClusterOutboundReplicationGroupMockedParameterized, ::testing::Values(false)); + +TEST_P(XClusterOutboundReplicationGroupMockedParameterized, TestMultipleTable) { CreateTable(kNamespaceId, kTableId1, kTableName1, kPgSchemaName); CreateTable(kNamespaceId, kTableId2, kTableName2, kPgSchemaName2); auto outbound_rg_ptr = CreateReplicationGroup(); @@ -318,17 +385,28 @@ TEST_F(XClusterOutboundReplicationGroupMockedTest, TestMultipleTable) { auto ns_info_opt = ASSERT_RESULT(outbound_rg.GetNamespaceCheckpointInfo(kNamespaceId)); ASSERT_TRUE(ns_info_opt.has_value()); - // We should have 2 streams now. - ASSERT_EQ(xcluster_streams.size(), 2); + if (UseAutomaticMode()) { + std::lock_guard l(mutex_); + // In automatic mode, sequences_data should have been created. + ASSERT_GT(namespace_tables[kPgSequencesDataNamespaceId].size(), 0); + } + + // We should have 2 streams for normal tables now. + ASSERT_EQ(xcluster_streams.size(), 2 + OverheadStreamsCount()); ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo( - kTableId1, kTableId2, *ns_info_opt, /*skip_schema_name_check=*/true)); + kTableId1, kTableId2, *ns_info_opt, /*sequences_data_included=*/UseAutomaticMode(), + /*skip_schema_name_check=*/true)); for (const auto& table_info : ns_info_opt->table_infos) { // Order is not deterministic so search with the table name. if (table_info.table_name == kTableName1) { - ASSERT_EQ(table_info.pg_schema_name, kPgSchemaName); + EXPECT_EQ(table_info.pg_schema_name, kPgSchemaName); + } else if (table_info.table_name == kTableName2) { + EXPECT_EQ(table_info.pg_schema_name, kPgSchemaName2); + } else if (table_info.table_name == "sequences_data") { + EXPECT_EQ(table_info.pg_schema_name, ""); } else { - ASSERT_EQ(table_info.pg_schema_name, kPgSchemaName2); + FAIL() << "unknown tablename " << table_info.table_name; } } @@ -338,7 +416,8 @@ TEST_F(XClusterOutboundReplicationGroupMockedTest, TestMultipleTable) { ASSERT_TRUE(ns_info_opt.has_value()); ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo( - kTableId1, kTableId2, *ns_info_opt, /*skip_schema_name_check=*/true)); + kTableId1, kTableId2, *ns_info_opt, /*sequences_data_included=*/false, + /*skip_schema_name_check=*/true)); ASSERT_EQ(ns_info_opt->table_infos[0].pg_schema_name, kPgSchemaName2); ASSERT_EQ(ns_info_opt->table_infos[1].pg_schema_name, kPgSchemaName); ASSERT_EQ(ns_info_opt->table_infos[0].table_name, kTableName2); @@ -355,7 +434,7 @@ TEST_F(XClusterOutboundReplicationGroupMockedTest, TestMultipleTable) { ASSERT_TRUE(xcluster_streams.empty()); } -TEST_F(XClusterOutboundReplicationGroupMockedTest, AddDeleteNamespaces) { +TEST_P(XClusterOutboundReplicationGroupMockedParameterized, AddDeleteNamespaces) { CreateTable(kNamespaceId, kTableId1, kTableName1, kPgSchemaName); CreateTable(kNamespaceId, kTableId2, kTableName2, kPgSchemaName); @@ -389,8 +468,8 @@ TEST_F(XClusterOutboundReplicationGroupMockedTest, AddDeleteNamespaces) { ASSERT_OK(outbound_rg.AddNamespacesSync(kEpoch, {kNamespaceId}, kTimeout)); - // We should have 2 streams now. - ASSERT_EQ(xcluster_streams.size(), 2); + // We should have 2 normal streams now. + EXPECT_EQ(xcluster_streams.size(), 2 + OverheadStreamsCount()); auto xcluster_streams_initial = xcluster_streams; // Make sure invalid namespace id is handled correctly. @@ -401,13 +480,14 @@ TEST_F(XClusterOutboundReplicationGroupMockedTest, AddDeleteNamespaces) { auto ns1_info_opt = ASSERT_RESULT(outbound_rg.GetNamespaceCheckpointInfo(kNamespaceId)); ASSERT_TRUE(ns1_info_opt.has_value()); - ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo(kTableId1, kTableId2, *ns1_info_opt)); + ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo( + kTableId1, kTableId2, *ns1_info_opt, /*sequences_data_included=*/UseAutomaticMode())); // Add the second namespace. ASSERT_OK(outbound_rg.AddNamespaceSync(kEpoch, namespace_id_2, kTimeout)); - // We should have 4 streams now. - ASSERT_EQ(xcluster_streams.size(), 4); + // We should have 4 normal streams now. + ASSERT_EQ(xcluster_streams.size(), 4 + 2 * OverheadStreamsCount()); // The info of the first namespace should not change. auto ns1_info_dup = ASSERT_RESULT(outbound_rg.GetNamespaceCheckpointInfo(kNamespaceId)); @@ -417,16 +497,18 @@ TEST_F(XClusterOutboundReplicationGroupMockedTest, AddDeleteNamespaces) { // Validate the seconds namespace. auto ns2_info_opt = ASSERT_RESULT(outbound_rg.GetNamespaceCheckpointInfo(namespace_id_2)); ASSERT_TRUE(ns2_info_opt.has_value()); - ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo(ns2_table_id_1, ns2_table_id_2, *ns2_info_opt)); + ASSERT_NO_FATALS(VerifyNamespaceCheckpointInfo( + ns2_table_id_1, ns2_table_id_2, *ns2_info_opt, + /*sequences_data_included=*/UseAutomaticMode())); ASSERT_OK(outbound_rg.RemoveNamespace(kEpoch, kNamespaceId, /*target_master_addresses=*/{})); ASSERT_FALSE(outbound_rg.HasNamespace(kNamespaceId)); ASSERT_NOK(outbound_rg.GetNamespaceCheckpointInfo(kNamespaceId)); // We should only have only the streams from second namespace. - ASSERT_EQ(xcluster_streams.size(), 2); + ASSERT_EQ(xcluster_streams.size(), 2 + OverheadStreamsCount()); - // new_xcluster_streams and all_xcluster_streams should not overlap. + // New_xcluster_streams and all_xcluster_streams should not overlap. for (const auto& stream : xcluster_streams) { ASSERT_FALSE(xcluster_streams_initial.contains(stream)); } @@ -443,7 +525,7 @@ TEST_F(XClusterOutboundReplicationGroupMockedTest, AddDeleteNamespaces) { ASSERT_TRUE(xcluster_streams.empty()); } -TEST_F(XClusterOutboundReplicationGroupMockedTest, CreateTargetReplicationGroup) { +TEST_P(XClusterOutboundReplicationGroupMockedParameterized, CreateTargetReplicationGroup) { CreateTable(kNamespaceId, kTableId1, kTableName1, kPgSchemaName); auto outbound_rg_ptr = CreateReplicationGroup(); @@ -452,12 +534,18 @@ TEST_F(XClusterOutboundReplicationGroupMockedTest, CreateTargetReplicationGroup) ASSERT_OK(outbound_rg.AddNamespaceSync(kEpoch, kNamespaceId, kTimeout)); - std::vector streams{xcluster_streams.begin(), xcluster_streams.end()}; + std::vector expected_streams{xcluster_streams.begin(), xcluster_streams.end()}; + std::vector expected_tables{kTableId1}; + if (UseAutomaticMode()) { + expected_tables.push_back(kPgSequencesDataTableId); + } EXPECT_CALL( - xcluster_client, SetupDbScopedUniverseReplication( - kReplicationGroupId, _, std::vector{kNamespaceName}, - std::vector{kNamespaceId}, std::vector{kTableId1}, - streams, /*automatic_ddl_mode=*/false)) + xcluster_client, + SetupDbScopedUniverseReplication( + kReplicationGroupId, _, std::vector{kNamespaceName}, + std::vector{kNamespaceId}, + ::testing::UnorderedElementsAreArray(expected_tables), + ::testing::UnorderedElementsAreArray(expected_streams), UseAutomaticMode())) .Times(AtLeast(1)); ASSERT_OK(outbound_rg.CreateXClusterReplication({}, {}, kEpoch)); @@ -512,7 +600,7 @@ TEST_F(XClusterOutboundReplicationGroupMockedTest, CreateTargetReplicationGroup) SysXClusterOutboundReplicationGroupEntryPB::TargetUniverseInfoPB::REPLICATING); } -TEST_F(XClusterOutboundReplicationGroupMockedTest, AddTable) { +TEST_P(XClusterOutboundReplicationGroupMockedParameterized, AddTable) { auto table_info1 = CreateTable(kNamespaceId, kTableId1, kTableName1, kPgSchemaName); CreateTable(kNamespaceId, kTableId2, kTableName2, kPgSchemaName2); @@ -520,15 +608,15 @@ TEST_F(XClusterOutboundReplicationGroupMockedTest, AddTable) { ASSERT_OK(outbound_rg->AddNamespaceSync(kEpoch, kNamespaceId, kTimeout)); ASSERT_TRUE(outbound_rg->HasNamespace(kNamespaceId)); - ASSERT_EQ(xcluster_streams.size(), 2); + EXPECT_EQ(xcluster_streams.size(), 2 + OverheadStreamsCount()); auto ns_info = ASSERT_RESULT(outbound_rg->GetNamespaceCheckpointInfo(kNamespaceId)); - ASSERT_EQ(ns_info->table_infos.size(), 2); + EXPECT_EQ(ns_info->table_infos.size(), 2 + OverheadStreamsCount()); // Same table should not get added twice. ASSERT_OK(outbound_rg->AddTable(table_info1, kEpoch)); - ASSERT_EQ(ns_info->table_infos.size(), 2); + ASSERT_EQ(ns_info->table_infos.size(), 2 + OverheadStreamsCount()); const TableName table_3 = "table3"; const TableId table_id_3 = "table_id_3"; @@ -536,10 +624,10 @@ TEST_F(XClusterOutboundReplicationGroupMockedTest, AddTable) { ASSERT_OK(outbound_rg->AddTable(table_info3, kEpoch)); - ASSERT_EQ(xcluster_streams.size(), 3); + ASSERT_EQ(xcluster_streams.size(), 3 + OverheadStreamsCount()); ns_info = ASSERT_RESULT(outbound_rg->GetNamespaceCheckpointInfo(kNamespaceId)); ASSERT_TRUE(ns_info.has_value()); - ASSERT_EQ(ns_info->table_infos.size(), 3); + ASSERT_EQ(ns_info->table_infos.size(), 3 + OverheadStreamsCount()); } // If we create a table during checkpoint, it should fail. diff --git a/src/yb/master/xcluster/xcluster_outbound_replication_group.cc b/src/yb/master/xcluster/xcluster_outbound_replication_group.cc index df581c84a245..054f408b85a7 100644 --- a/src/yb/master/xcluster/xcluster_outbound_replication_group.cc +++ b/src/yb/master/xcluster/xcluster_outbound_replication_group.cc @@ -12,9 +12,10 @@ // #include "yb/master/xcluster/xcluster_outbound_replication_group.h" -#include "yb/common/xcluster_util.h" + #include "yb/client/xcluster_client.h" #include "yb/common/colocated_util.h" +#include "yb/common/xcluster_util.h" #include "yb/master/catalog_entity_info.h" #include "yb/master/xcluster/xcluster_outbound_replication_group_tasks.h" #include "yb/util/is_operation_done_result.h" @@ -24,6 +25,8 @@ DEFINE_RUNTIME_uint32(max_xcluster_streams_to_checkpoint_in_parallel, 200, "Maximum number of xCluster streams to checkpoint in parallel"); +DECLARE_bool(TEST_xcluster_enable_sequence_replication); + using namespace std::placeholders; namespace yb::master { @@ -77,6 +80,7 @@ XClusterOutboundReplicationGroup::XClusterOutboundReplicationGroup( : CatalogEntityWithTasks(std::move(tasks_tracker)), helper_functions_(std::move(helper_functions)), task_factory_(task_factory) { + automatic_ddl_mode_ = outbound_replication_group_pb.automatic_ddl_mode(); outbound_rg_info_ = std::make_unique(replication_group_id); outbound_rg_info_->Load(outbound_replication_group_pb); } @@ -263,7 +267,9 @@ Result XClusterOutboundReplicationGroup::MarkBootstrapTablesAsCheckpointed "Namespace in unexpected state"); if (table_ids.empty()) { - auto table_infos = VERIFY_RESULT(helper_functions_.get_tables_func(namespace_id)); + auto table_infos = VERIFY_RESULT(helper_functions_.get_tables_func( + namespace_id, /*include_sequences_data=*/( + AutomaticDDLMode() && FLAGS_TEST_xcluster_enable_sequence_replication))); std::set tables; std::transform( table_infos.begin(), table_infos.end(), std::inserter(tables, tables.begin()), @@ -342,7 +348,9 @@ Result XClusterOutboundReplicationGroup::GetNamespaceName( Result XClusterOutboundReplicationGroup::CreateNamespaceInfo( const NamespaceId& namespace_id, const LeaderEpoch& epoch) { - auto table_infos = VERIFY_RESULT(helper_functions_.get_tables_func(namespace_id)); + auto table_infos = VERIFY_RESULT(helper_functions_.get_tables_func( + namespace_id, /*include_sequences_data=*/( + AutomaticDDLMode() && FLAGS_TEST_xcluster_enable_sequence_replication))); VLOG_WITH_PREFIX_AND_FUNC(1) << "Tables: " << yb::ToString(table_infos); SCHECK( @@ -378,6 +386,23 @@ XClusterOutboundReplicationGroup::CreateNamespaceInfo( return ns_info; } +Status XClusterOutboundReplicationGroup::AddTableToInitialBootstrapMapping( + const NamespaceId& namespace_id, const TableId& table_id, const LeaderEpoch& epoch) { + std::lock_guard mutex_lock(mutex_); + auto l = VERIFY_RESULT(LockForWrite()); + + auto* ns_info = VERIFY_RESULT(GetNamespaceInfo(namespace_id)); + if (ns_info->mutable_table_infos()->count(table_id) > 0) { + return Status::OK(); + } + SysXClusterOutboundReplicationGroupEntryPB::NamespaceInfoPB::TableInfoPB table_info; + table_info.set_is_checkpointing(true); + table_info.set_is_part_of_initial_bootstrap(true); + ns_info->mutable_table_infos()->insert({table_id, std::move(table_info)}); + + return Upsert(l, epoch); +} + Result XClusterOutboundReplicationGroup::AddNamespaceInternal( const NamespaceId& namespace_id, XClusterOutboundReplicationGroupInfo::WriteLock& l, const LeaderEpoch& epoch) { @@ -576,7 +601,9 @@ XClusterOutboundReplicationGroup::GetNamespaceCheckpointInfo( NamespaceCheckpointInfo ns_info; ns_info.initial_bootstrap_required = namespace_info->initial_bootstrap_required(); - auto all_tables = VERIFY_RESULT(helper_functions_.get_tables_func(namespace_id)); + auto all_tables = VERIFY_RESULT(helper_functions_.get_tables_func( + namespace_id, /*include_sequences_data=*/( + AutomaticDDLMode() && FLAGS_TEST_xcluster_enable_sequence_replication))); std::vector> table_infos; if (!table_names.empty()) { @@ -1147,10 +1174,8 @@ Result XClusterOutboundReplicationGroup::GetStreamId( return table_info->stream_id(); } -Result XClusterOutboundReplicationGroup::AutomaticDDLMode() const { - SharedLock mutex_lock(mutex_); - auto l = VERIFY_RESULT(LockForRead()); - return l->pb.automatic_ddl_mode(); +bool XClusterOutboundReplicationGroup::AutomaticDDLMode() const { + return automatic_ddl_mode_; } } // namespace yb::master diff --git a/src/yb/master/xcluster/xcluster_outbound_replication_group.h b/src/yb/master/xcluster/xcluster_outbound_replication_group.h index 62c46b1ecdf2..bfd04590488d 100644 --- a/src/yb/master/xcluster/xcluster_outbound_replication_group.h +++ b/src/yb/master/xcluster/xcluster_outbound_replication_group.h @@ -13,13 +13,13 @@ #pragma once -#include "yb/master/xcluster/master_xcluster_types.h" -#include "yb/master/xcluster/xcluster_catalog_entity.h" - #include "yb/cdc/xcluster_types.h" #include "yb/gutil/thread_annotations.h" +#include "yb/master/xcluster/master_xcluster_types.h" +#include "yb/master/xcluster/xcluster_catalog_entity.h" + namespace yb { class IsOperationDoneResult; @@ -37,9 +37,12 @@ class XClusterOutboundReplicationGroup public CatalogEntityWithTasks { public: struct HelperFunctions { + const std::function create_sequences_data_table_func; const std::function>(const NamespaceIdentifierPB&)> get_namespace_func; - const std::function>(const NamespaceId&)> get_tables_func; + const std::function>( + const NamespaceId&, bool include_sequences_data)> + get_tables_func; const std::function>( const std::vector&, const LeaderEpoch&)> create_xcluster_streams_func; @@ -138,7 +141,7 @@ class XClusterOutboundReplicationGroup Result GetStreamId(const NamespaceId& namespace_id, const TableId& table_id) const EXCLUDES(mutex_); - Result AutomaticDDLMode() const EXCLUDES(mutex_); + bool AutomaticDDLMode() const; private: friend class XClusterOutboundReplicationGroupMocked; @@ -185,6 +188,10 @@ class XClusterOutboundReplicationGroup Result CreateNamespaceInfo( const NamespaceId& namespace_id, const LeaderEpoch& epoch) REQUIRES(mutex_); + Status AddTableToInitialBootstrapMapping( + const NamespaceId& namespace_id, const TableId& table_id, const LeaderEpoch& epoch) + EXCLUDES(mutex_); + // Returns the NamespaceInfoPB for the given namespace_id. If its not found returns a NotFound // status. Caller must hold the WriteLock. Result GetNamespaceInfo(const NamespaceId& namespace_id) REQUIRES(mutex_); @@ -251,6 +258,8 @@ class XClusterOutboundReplicationGroup XClusterOutboundReplicationGroupTaskFactory& task_factory_; + bool automatic_ddl_mode_; + DISALLOW_COPY_AND_ASSIGN(XClusterOutboundReplicationGroup); }; diff --git a/src/yb/master/xcluster/xcluster_outbound_replication_group_tasks.cc b/src/yb/master/xcluster/xcluster_outbound_replication_group_tasks.cc index fc95c4902c16..c090980a109b 100644 --- a/src/yb/master/xcluster/xcluster_outbound_replication_group_tasks.cc +++ b/src/yb/master/xcluster/xcluster_outbound_replication_group_tasks.cc @@ -19,6 +19,8 @@ DEFINE_test_flag(bool, block_xcluster_checkpoint_namespace_task, false, "When enabled XClusterCheckpointNamespaceTask will be blocked"); +DECLARE_bool(TEST_xcluster_enable_sequence_replication); + using namespace std::placeholders; namespace yb::master { @@ -63,6 +65,21 @@ std::string XClusterCheckpointNamespaceTask::description() const { } Status XClusterCheckpointNamespaceTask::FirstStep() { + if (outbound_replication_group_.AutomaticDDLMode() && + FLAGS_TEST_xcluster_enable_sequence_replication) { + // Ensure sequences_data table has been created and added to our tables to checkpoint. + // TODO: Consider making this async so we don't have to burn a thread waiting. + RETURN_NOT_OK(outbound_replication_group_.helper_functions_.create_sequences_data_table_func()); + RETURN_NOT_OK(outbound_replication_group_.AddTableToInitialBootstrapMapping( + namespace_id_, kPgSequencesDataTableId, epoch_)); + } + + ScheduleNextStep( + std::bind(&XClusterCheckpointNamespaceTask::CreateStreams, this), "CreateStreams"); + return Status::OK(); +} + +Status XClusterCheckpointNamespaceTask::CreateStreams() { RETURN_NOT_OK( outbound_replication_group_.CreateStreamsForInitialBootstrap(namespace_id_, epoch_)); ScheduleNextStep( diff --git a/src/yb/master/xcluster/xcluster_outbound_replication_group_tasks.h b/src/yb/master/xcluster/xcluster_outbound_replication_group_tasks.h index c74d83bc00b8..9ced3b6efca8 100644 --- a/src/yb/master/xcluster/xcluster_outbound_replication_group_tasks.h +++ b/src/yb/master/xcluster/xcluster_outbound_replication_group_tasks.h @@ -45,6 +45,7 @@ class XClusterCheckpointNamespaceTask : public MultiStepCatalogEntityTask { Status FirstStep() override; private: + Status CreateStreams(); Status CheckpointStreams(); void CheckpointStreamsCallback(XClusterCheckpointStreamsResult result); Status MarkTablesAsCheckpointed(XClusterCheckpointStreamsResult result); diff --git a/src/yb/master/xcluster/xcluster_source_manager.cc b/src/yb/master/xcluster/xcluster_source_manager.cc index 96b6b3e2c6b1..61391bb02efc 100644 --- a/src/yb/master/xcluster/xcluster_source_manager.cc +++ b/src/yb/master/xcluster/xcluster_source_manager.cc @@ -17,24 +17,29 @@ #include "yb/cdc/cdc_service.proxy.h" #include "yb/cdc/cdc_state_table.h" #include "yb/cdc/xcluster_types.h" + #include "yb/client/xcluster_client.h" #include "yb/common/xcluster_util.h" + #include "yb/master/catalog_manager.h" #include "yb/master/master.h" -#include "yb/master/xcluster/master_xcluster_util.h" -#include "yb/master/xcluster/xcluster_status.h" -#include "yb/util/is_operation_done_result.h" #include "yb/master/xcluster/add_table_to_xcluster_source_task.h" +#include "yb/master/xcluster/master_xcluster_util.h" #include "yb/master/xcluster/xcluster_catalog_entity.h" #include "yb/master/xcluster/xcluster_outbound_replication_group.h" #include "yb/master/xcluster/xcluster_outbound_replication_group_tasks.h" +#include "yb/master/xcluster/xcluster_status.h" + +#include "yb/tserver/pg_create_table.h" +#include "yb/util/is_operation_done_result.h" #include "yb/util/scope_exit.h" DEFINE_RUNTIME_bool(enable_tablet_split_of_xcluster_bootstrapping_tables, false, "When set, it enables automatic tablet splitting for tables that are part of an " "xCluster replication setup and are currently being bootstrapped for xCluster."); +DECLARE_int32(master_yb_client_default_timeout_ms); DECLARE_uint32(cdc_wal_retention_time_secs); DECLARE_bool(TEST_disable_cdc_state_insert_on_setup); @@ -185,13 +190,22 @@ XClusterSourceManager::InitOutboundReplicationGroup( const xcluster::ReplicationGroupId& replication_group_id, const SysXClusterOutboundReplicationGroupEntryPB& metadata) { XClusterOutboundReplicationGroup::HelperFunctions helper_functions = { + .create_sequences_data_table_func = + [client = master_.client_future()]() { + return tserver::CreateSequencesDataTable( + client.get(), + CoarseMonoClock::now() + + MonoDelta::FromMilliseconds(FLAGS_master_yb_client_default_timeout_ms)); + }, .get_namespace_func = [&catalog_manager = catalog_manager_](const NamespaceIdentifierPB& ns_identifier) { return catalog_manager.FindNamespace(ns_identifier); }, .get_tables_func = - [&catalog_manager = catalog_manager_](const NamespaceId& namespace_id) { - return GetTablesEligibleForXClusterReplication(catalog_manager, namespace_id); + [&catalog_manager = catalog_manager_]( + const NamespaceId& namespace_id, bool include_sequences_data) { + return GetTablesEligibleForXClusterReplication( + catalog_manager, namespace_id, include_sequences_data); }, .create_xcluster_streams_func = std::bind(&XClusterSourceManager::CreateStreamsForDbScoped, this, _1, _2), @@ -1158,7 +1172,7 @@ XClusterSourceManager::GetXClusterOutboundReplicationGroupInfo( } result.namespace_table_map[namespace_id] = std::move(ns_info); } - result.automatic_ddl_mode = VERIFY_RESULT(outbound_replication_group->AutomaticDDLMode()); + result.automatic_ddl_mode = outbound_replication_group->AutomaticDDLMode(); return result; } From 9d4b6e09830d1dfdfeea84456d84658643ab6232 Mon Sep 17 00:00:00 2001 From: Nikhil Bhatia Date: Thu, 12 Sep 2024 22:31:57 +0000 Subject: [PATCH 22/75] [PLAT-15302] Remove parallel query from enhanced PG parity gflags Summary: This change removes parallel query from the enhanced PG parity gflag group from all deployments. This change has been done based on the recommendations from the LRT/DST teams. Test Plan: Create a universe and ensure that parallel query is not present in the enhanced PG parity set of flags Reviewers: yshchetinin, sneelakantan, dkumar Reviewed By: dkumar Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38024 --- managed/src/main/resources/gflag_groups/2.23/gflag_groups.json | 2 +- .../src/main/resources/gflag_groups/2024.1.3/gflag_groups.json | 2 +- .../src/main/resources/gflag_groups/2024.1/gflag_groups.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/managed/src/main/resources/gflag_groups/2.23/gflag_groups.json b/managed/src/main/resources/gflag_groups/2.23/gflag_groups.json index 682ddc68a851..45802ea2049a 100644 --- a/managed/src/main/resources/gflag_groups/2.23/gflag_groups.json +++ b/managed/src/main/resources/gflag_groups/2.23/gflag_groups.json @@ -6,7 +6,7 @@ "TSERVER": { "yb_enable_read_committed_isolation": "true", "ysql_enable_read_request_caching": "true", - "ysql_pg_conf_csv": "yb_enable_base_scans_cost_model=true,yb_enable_optimizer_statistics=true,yb_bnl_batch_size=1024,yb_parallel_range_rows=10000,yb_fetch_row_limit=0,yb_fetch_size_limit='1MB',yb_use_hash_splitting_by_default=false" + "ysql_pg_conf_csv": "yb_enable_base_scans_cost_model=true,yb_enable_optimizer_statistics=true,yb_bnl_batch_size=1024,yb_fetch_row_limit=0,yb_fetch_size_limit='1MB',yb_use_hash_splitting_by_default=false" } } } diff --git a/managed/src/main/resources/gflag_groups/2024.1.3/gflag_groups.json b/managed/src/main/resources/gflag_groups/2024.1.3/gflag_groups.json index 3f4e5694674a..c0d3b57642ad 100644 --- a/managed/src/main/resources/gflag_groups/2024.1.3/gflag_groups.json +++ b/managed/src/main/resources/gflag_groups/2024.1.3/gflag_groups.json @@ -6,7 +6,7 @@ "TSERVER": { "yb_enable_read_committed_isolation": "true", "ysql_enable_read_request_caching": "true", - "ysql_pg_conf_csv": "yb_enable_base_scans_cost_model=true,yb_enable_optimizer_statistics=true,yb_bnl_batch_size=1024,yb_parallel_range_rows=10000,yb_enable_bitmapscan=true,yb_use_hash_splitting_by_default=false" + "ysql_pg_conf_csv": "yb_enable_base_scans_cost_model=true,yb_enable_optimizer_statistics=true,yb_bnl_batch_size=1024,yb_enable_bitmapscan=true,yb_use_hash_splitting_by_default=false" } } } diff --git a/managed/src/main/resources/gflag_groups/2024.1/gflag_groups.json b/managed/src/main/resources/gflag_groups/2024.1/gflag_groups.json index 682ddc68a851..45802ea2049a 100644 --- a/managed/src/main/resources/gflag_groups/2024.1/gflag_groups.json +++ b/managed/src/main/resources/gflag_groups/2024.1/gflag_groups.json @@ -6,7 +6,7 @@ "TSERVER": { "yb_enable_read_committed_isolation": "true", "ysql_enable_read_request_caching": "true", - "ysql_pg_conf_csv": "yb_enable_base_scans_cost_model=true,yb_enable_optimizer_statistics=true,yb_bnl_batch_size=1024,yb_parallel_range_rows=10000,yb_fetch_row_limit=0,yb_fetch_size_limit='1MB',yb_use_hash_splitting_by_default=false" + "ysql_pg_conf_csv": "yb_enable_base_scans_cost_model=true,yb_enable_optimizer_statistics=true,yb_bnl_batch_size=1024,yb_fetch_row_limit=0,yb_fetch_size_limit='1MB',yb_use_hash_splitting_by_default=false" } } } From d50282de29cfc0d6a8c747e10b7b4752bad5bdc2 Mon Sep 17 00:00:00 2001 From: Dwight Hodge <79169168+ddhodge@users.noreply.github.com> Date: Sun, 15 Sep 2024 21:09:27 -0400 Subject: [PATCH 23/75] [doc] Fix RSS link (#23932) * RSS link * fix appversion --- docs/content/preview/releases/yba-releases/v2.21.md | 2 +- docs/content/preview/releases/yba-releases/v2.23.md | 2 +- docs/content/preview/releases/yba-releases/v2024.1.md | 2 +- .../content/preview/releases/ybdb-releases/end-of-life/v2.11.md | 2 +- .../content/preview/releases/ybdb-releases/end-of-life/v2.12.md | 2 +- .../content/preview/releases/ybdb-releases/end-of-life/v2.13.md | 2 +- docs/content/preview/releases/ybdb-releases/v2.21.md | 2 +- docs/content/preview/releases/ybdb-releases/v2.23.md | 2 +- docs/content/preview/releases/ybdb-releases/v2024.1.md | 2 +- docs/data/currentVersions.json | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/content/preview/releases/yba-releases/v2.21.md b/docs/content/preview/releases/yba-releases/v2.21.md index 049cadfe4c69..54d6f38c77a3 100644 --- a/docs/content/preview/releases/yba-releases/v2.21.md +++ b/docs/content/preview/releases/yba-releases/v2.21.md @@ -15,7 +15,7 @@ type: docs What follows are the release notes for all releases in the YugabyteDB Anywhere (YBA) v2.21 series. Content will be added as new notable features and changes are available in the patch releases of the v2.21 series. -For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../../index.xml). +For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../index.xml). ## v2.21.1.0 - June 13, 2024 {#v2.21.1.0} diff --git a/docs/content/preview/releases/yba-releases/v2.23.md b/docs/content/preview/releases/yba-releases/v2.23.md index 85100eadd984..0612c3187143 100644 --- a/docs/content/preview/releases/yba-releases/v2.23.md +++ b/docs/content/preview/releases/yba-releases/v2.23.md @@ -13,7 +13,7 @@ type: docs What follows are the release notes for all releases in the YugabyteDB Anywhere (YBA) v2.23 series. Content will be added as new notable features and changes are available in the patch releases of the v2.23 series. -For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../../index.xml). +For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../index.xml). ## v2.23.0.0 - September 13, 2024 {#v2.23.0.0} diff --git a/docs/content/preview/releases/yba-releases/v2024.1.md b/docs/content/preview/releases/yba-releases/v2024.1.md index 9e116c2e1597..89d33ad333a4 100644 --- a/docs/content/preview/releases/yba-releases/v2024.1.md +++ b/docs/content/preview/releases/yba-releases/v2024.1.md @@ -13,7 +13,7 @@ type: docs What follows are the release notes for all releases in the **YugabyteDB Anywhere** (YBA) v2024.1 series. Content will be added as new notable features and changes are available in the patch releases of the YBA v2024.1 series. -For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../../index.xml). +For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../index.xml). {{}} The 2024.1 release series is not available for installation using Replicated. diff --git a/docs/content/preview/releases/ybdb-releases/end-of-life/v2.11.md b/docs/content/preview/releases/ybdb-releases/end-of-life/v2.11.md index 734abe97e035..197fe005f552 100644 --- a/docs/content/preview/releases/ybdb-releases/end-of-life/v2.11.md +++ b/docs/content/preview/releases/ybdb-releases/end-of-life/v2.11.md @@ -17,7 +17,7 @@ type: docs Included here are the release notes for all releases in the v2.11 release series. Content will be added as new notable features and changes are available in the patch releases of the v2.11 release series. -For an RSS feed of all release series to track the latest product updates, point your feed reader to the [RSS feed for releases](../../../index.xml). +For an RSS feed of all release series to track the latest product updates, point your feed reader to the [RSS feed for releases](../../index.xml). ## v2.11.2.0 - January 26, 2022 {#v2.11.2.0} diff --git a/docs/content/preview/releases/ybdb-releases/end-of-life/v2.12.md b/docs/content/preview/releases/ybdb-releases/end-of-life/v2.12.md index e24b8cf5c5f9..8e3f5acc8a39 100644 --- a/docs/content/preview/releases/ybdb-releases/end-of-life/v2.12.md +++ b/docs/content/preview/releases/ybdb-releases/end-of-life/v2.12.md @@ -19,7 +19,7 @@ type: docs Included here are the release notes for the v2.12 release series. Content will be added as new notable features and changes are available in the patch releases of the v2.12 release series. -For an RSS feed of all release series to track the latest product updates, point your feed reader to the [RSS feed for releases](../../../index.xml). +For an RSS feed of all release series to track the latest product updates, point your feed reader to the [RSS feed for releases](../../index.xml). ## v2.12.12.0 - February 13, 2023 {#v2.12.12.0} diff --git a/docs/content/preview/releases/ybdb-releases/end-of-life/v2.13.md b/docs/content/preview/releases/ybdb-releases/end-of-life/v2.13.md index 25fbfc54f681..57a1e01dce64 100644 --- a/docs/content/preview/releases/ybdb-releases/end-of-life/v2.13.md +++ b/docs/content/preview/releases/ybdb-releases/end-of-life/v2.13.md @@ -17,7 +17,7 @@ type: docs Included here are the release notes for all releases in the v2.13 release series. Content will be added as new notable features and changes are available in the patch releases of the v2.13 release series. -For an RSS feed of all release series to track the latest product updates, point your feed reader to the [RSS feed for releases](../../../index.xml). +For an RSS feed of all release series to track the latest product updates, point your feed reader to the [RSS feed for releases](../../index.xml). {{< tip title="Product name changes" >}} Starting with v2.13.1.0, the names of some products have changed. Yugabyte Cloud is now **YugabyteDB Aeon**, and Yugabyte Platform is now **YugabyteDB Anywhere**. diff --git a/docs/content/preview/releases/ybdb-releases/v2.21.md b/docs/content/preview/releases/ybdb-releases/v2.21.md index bd3e40196562..2229b8fff659 100644 --- a/docs/content/preview/releases/ybdb-releases/v2.21.md +++ b/docs/content/preview/releases/ybdb-releases/v2.21.md @@ -22,7 +22,7 @@ type: docs What follows are the release notes for the YugabyteDB v2.21 release series. Content will be added as new notable features and changes are available in the patch releases of the YugabyteDB v2.21 release series. -For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../../index.xml). +For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../index.xml). {{}} YugabyteDB 2.21.0.0 and newer releases do not support v7 Linux versions (CentOS7, Red Hat Enterprise Linux 7, Oracle Enterprise Linux 7.x), Amazon Linux 2, and Ubuntu 18. If you're currently using one of these Linux versions, upgrade to a supported OS version before installing YugabyteDB v2.21. Refer to [Operating system support](../../../reference/configuration/operating-systems/) for the complete list of supported operating systems. diff --git a/docs/content/preview/releases/ybdb-releases/v2.23.md b/docs/content/preview/releases/ybdb-releases/v2.23.md index ddaf8f97aa14..b74d0620048d 100644 --- a/docs/content/preview/releases/ybdb-releases/v2.23.md +++ b/docs/content/preview/releases/ybdb-releases/v2.23.md @@ -13,7 +13,7 @@ type: docs What follows are the release notes for the YugabyteDB v2.23 release series. Content will be added as new notable features and changes are available in the patch releases of the YugabyteDB v2.23 release series. -For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../../index.xml). +For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../index.xml). ## v2.23.0.0 - September 13, 2024 {#v2.23.0.0} diff --git a/docs/content/preview/releases/ybdb-releases/v2024.1.md b/docs/content/preview/releases/ybdb-releases/v2024.1.md index 11a1828d3454..c31b9dc7a542 100644 --- a/docs/content/preview/releases/ybdb-releases/v2024.1.md +++ b/docs/content/preview/releases/ybdb-releases/v2024.1.md @@ -15,7 +15,7 @@ type: docs What follows are the release notes for the YugabyteDB 2024.1 release series. Content will be added as new notable features and changes are available in the patch releases of the YugabyteDB 2024.1 release series. -For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../../index.xml). +For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../index.xml). {{}} YugabyteDB 2024.1.0.0 and newer releases do not support v7 Linux versions (CentOS7, Red Hat Enterprise Linux 7, Oracle Enterprise Linux 7.x), Amazon Linux 2, and Ubuntu 18. If you're currently using one of these Linux versions, upgrade to a supported OS version before installing YugabyteDB v2024.1.0. Refer to [Operating system support](/stable/reference/configuration/operating-systems/) for the complete list of supported operating systems. diff --git a/docs/data/currentVersions.json b/docs/data/currentVersions.json index fea50b7584eb..e7643454d88c 100644 --- a/docs/data/currentVersions.json +++ b/docs/data/currentVersions.json @@ -7,7 +7,7 @@ "display": "v2.23 (Preview)", "version": "2.23.0.0", "versionShort": "2.23.0", - "appVersion": "2.23.0.0-b711", + "appVersion": "2.23.0.0-b710", "isStable": false, "initialRelease": "2024-09-13" }, From 6990c7ad437586a50f363587b3f242b382ad2c4e Mon Sep 17 00:00:00 2001 From: Naorem Khogendro Singh Date: Fri, 13 Sep 2024 13:26:29 -0700 Subject: [PATCH 24/75] [PLAT-15306] Update YBA node agent TLS certs to use strong ciphers Summary: Default TLS config for golang accepts even insecure cipher suites. This filters out the insecure ones. Test Plan: Before the fix: ``` ns-mbp-jmd6n:experiment nkhogen$ nmap -sV --script ssl-enum-ciphers -p 9070 10.9.75.20 Starting Nmap 7.94 ( https://nmap.org ) at 2024-09-13 13:17 PDT Nmap scan report for 10.9.75.20 Host is up (0.036s latency). PORT STATE SERVICE VERSION 9070/tcp open ssl/http Golang net/http server (Go-IPFS json-rpc or InfluxDB API) | ssl-enum-ciphers: | TLSv1.2: | ciphers: | TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 (secp256r1) - A | TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 (secp256r1) - A | TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (secp256r1) - A | TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA (secp256r1) - A | TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA (secp256r1) - A | TLS_RSA_WITH_AES_128_GCM_SHA256 (rsa 2048) - A | TLS_RSA_WITH_AES_256_GCM_SHA384 (rsa 2048) - A | TLS_RSA_WITH_AES_128_CBC_SHA (rsa 2048) - A | TLS_RSA_WITH_AES_256_CBC_SHA (rsa 2048) - A | TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA (secp256r1) - C | TLS_RSA_WITH_3DES_EDE_CBC_SHA (rsa 2048) - C | compressors: | NULL | cipher preference: server | warnings: | 64-bit block cipher 3DES vulnerable to SWEET32 attack | TLSv1.3: | ciphers: | TLS_AKE_WITH_AES_128_GCM_SHA256 (ecdh_x25519) - A | TLS_AKE_WITH_AES_256_GCM_SHA384 (ecdh_x25519) - A | TLS_AKE_WITH_CHACHA20_POLY1305_SHA256 (ecdh_x25519) - A | cipher preference: server |_ least strength: C ``` After the fix: ``` ns-mbp-jmd6n:experiment nkhogen$ nmap -sV --script ssl-enum-ciphers -p 9070 10.9.142.135 Starting Nmap 7.94 ( https://nmap.org ) at 2024-09-13 13:28 PDT Nmap scan report for 10.9.142.135 Host is up (0.034s latency). PORT STATE SERVICE VERSION 9070/tcp open ssl/http Golang net/http server (Go-IPFS json-rpc or InfluxDB API) | ssl-enum-ciphers: | TLSv1.2: | ciphers: | TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 (secp256r1) - A | TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 (secp256r1) - A | TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (secp256r1) - A | TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA (secp256r1) - A | TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA (secp256r1) - A | TLS_RSA_WITH_AES_128_GCM_SHA256 (rsa 2048) - A | TLS_RSA_WITH_AES_256_GCM_SHA384 (rsa 2048) - A | TLS_RSA_WITH_AES_128_CBC_SHA (rsa 2048) - A | TLS_RSA_WITH_AES_256_CBC_SHA (rsa 2048) - A | compressors: | NULL | cipher preference: server | TLSv1.3: | ciphers: | TLS_AKE_WITH_AES_128_GCM_SHA256 (ecdh_x25519) - A | TLS_AKE_WITH_AES_256_GCM_SHA384 (ecdh_x25519) - A | TLS_AKE_WITH_CHACHA20_POLY1305_SHA256 (ecdh_x25519) - A | cipher preference: server |_ least strength: A ``` Reviewers: nbhatia, sanketh Reviewed By: nbhatia Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38047 --- managed/node-agent/app/server/rpc.go | 13 ++----------- managed/node-agent/util/certs_util.go | 21 +++++++++++++++++++++ 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/managed/node-agent/app/server/rpc.go b/managed/node-agent/app/server/rpc.go index 095cad27319e..bd61e8c22680 100644 --- a/managed/node-agent/app/server/rpc.go +++ b/managed/node-agent/app/server/rpc.go @@ -84,11 +84,7 @@ func NewRPCServer( // Create a new listener with TLS for both RPC and metrics server. listener = tls.NewListener( listener, - &tls.Config{ - Certificates: tlsConfig.Certificates, - MinVersion: tls.VersionTLS12, - NextProtos: []string{http2.NextProtoTLS, "http/1.1"}, - }, + util.TlsConfig(tlsConfig.Certificates, []string{"http/1.1", http2.NextProtoTLS}), ) } } @@ -161,12 +157,7 @@ func loadTLSConfig() (*tls.Config, error) { if err != nil { return nil, err } - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{serverCert}, - ClientAuth: tls.NoClientCert, - MinVersion: tls.VersionTLS12, - } - return tlsConfig, nil + return util.TlsConfig([]tls.Certificate{serverCert}, nil), nil } func removeFileIfPresent(filename string) error { diff --git a/managed/node-agent/util/certs_util.go b/managed/node-agent/util/certs_util.go index 92589ca1e3f6..ae271348c02b 100644 --- a/managed/node-agent/util/certs_util.go +++ b/managed/node-agent/util/certs_util.go @@ -5,6 +5,7 @@ package util import ( "context" "crypto" + "crypto/tls" "crypto/x509" "encoding/json" "encoding/pem" @@ -293,3 +294,23 @@ func VerifyJWT(ctx context.Context, config *Config, authToken string) (*jwt.MapC } return nil, fmt.Errorf("Invalid token") } + +// TlsConfig returns the common TLS config to be used. +func TlsConfig(certs []tls.Certificate, nextProtos []string) *tls.Config { + whitelists := []uint16{} + for _, suites := range tls.CipherSuites() { + // Method excludes insecure ones but the check is just added to be more cautious. + if !suites.Insecure { + whitelists = append(whitelists, suites.ID) + } + } + tlsConfig := &tls.Config{ + Certificates: certs, + MinVersion: tls.VersionTLS12, + CipherSuites: whitelists, + } + if len(nextProtos) > 0 { + tlsConfig.NextProtos = nextProtos + } + return tlsConfig +} From d6e81dff393b7e6331211fee051fbf64f1e842b9 Mon Sep 17 00:00:00 2001 From: Shubham Date: Mon, 16 Sep 2024 13:31:53 +0530 Subject: [PATCH 25/75] [PLAT-15325] Pass the region_name during YNP installation Summary: Pass the region_name during YNP installation Test Plan: Provisioned a node using YNP. Reviewers: skhilar Reviewed By: skhilar Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38064 --- .../resources/ynp/modules/provision/node_agent/templates/run.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/managed/node-agent/resources/ynp/modules/provision/node_agent/templates/run.j2 b/managed/node-agent/resources/ynp/modules/provision/node_agent/templates/run.j2 index f09ec1f3a517..8cbb6aaa6f6c 100644 --- a/managed/node-agent/resources/ynp/modules/provision/node_agent/templates/run.j2 +++ b/managed/node-agent/resources/ynp/modules/provision/node_agent/templates/run.j2 @@ -196,7 +196,7 @@ installer_dir="{{ ynp_dir }}/../../bin" su - {{ yb_user }} -c "\"$installer_dir/node-agent-installer.sh\" -c install \ -u {{ url }} -t {{ api_key }} --provider_id $provider_id \ --instance_type {{ instance_type_name }} --zone_name {{ provider_region_zone_name }} \ ---node_name {{ node_name }} --node_ip {{ node_external_fqdn }} \ +--node_name {{ node_name }} --region_name {{ provider_region_name }} --node_ip {{ node_external_fqdn }} \ --bind_ip {{ bind_ip }} --silent --skip_verify_cert $airgap_flag" loginctl enable-linger {{ yb_user }} From 2ba818062174f874e0b8ec2bcadc9dd607d122e5 Mon Sep 17 00:00:00 2001 From: Yury Shchetinin Date: Wed, 28 Aug 2024 21:40:09 +0300 Subject: [PATCH 26/75] [PLAT-14883][Azure] OS upgrades seems to be leaving over unattached disks Summary: Used the same approach of deleting disks after OS upgrade for azure. Removed duplicate code in VMImageUpgrade. Test Plan: 1) run OS upgrade with the same image and force=true and force it to fail while deleting disk 2) retry that task (without force=true) - verify that old disk is deleted Reviewers: svarshney, muthu, nsingh Reviewed By: muthu Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D37680 --- .../devops/opscli/ybops/cloud/azure/cloud.py | 4 + .../devops/opscli/ybops/cloud/azure/method.py | 6 +- .../devops/opscli/ybops/cloud/azure/utils.py | 20 +++++ .../tasks/upgrade/VMImageUpgrade.java | 85 ++++++++++--------- 4 files changed, 69 insertions(+), 46 deletions(-) diff --git a/managed/devops/opscli/ybops/cloud/azure/cloud.py b/managed/devops/opscli/ybops/cloud/azure/cloud.py index 002cd56f4031..9ba3506c1d65 100644 --- a/managed/devops/opscli/ybops/cloud/azure/cloud.py +++ b/managed/devops/opscli/ybops/cloud/azure/cloud.py @@ -237,6 +237,10 @@ def edit_dns_record_set(self, dns_zone_id, domain_name_prefix, ip_list): def delete_dns_record_set(self, dns_zone_id, domain_name_prefix): return self.get_admin().delete_dns_record_set(dns_zone_id, domain_name_prefix) + def delete_volumes(self, args): + tags = json.loads(args.instance_tags) if args.instance_tags is not None else {} + return self.get_admin().delete_disks(tags) + def modify_tags(self, args): instance = self.get_host_info(args) if not instance: diff --git a/managed/devops/opscli/ybops/cloud/azure/method.py b/managed/devops/opscli/ybops/cloud/azure/method.py index 63133753756d..3cf06efed7ff 100644 --- a/managed/devops/opscli/ybops/cloud/azure/method.py +++ b/managed/devops/opscli/ybops/cloud/azure/method.py @@ -131,11 +131,7 @@ def __init__(self, base_command): super(AzureReplaceRootVolumeMethod, self).__init__(base_command) def _mount_root_volume(self, host_info, volume): - curr_root_vol = host_info["root_volume"] self.cloud.mount_disk(host_info, volume) - disk_del = self.cloud.get_admin().delete_disk(curr_root_vol) - disk_del.wait() - logging.info("[app] Successfully deleted old OS disk {}".format(curr_root_vol)) def _host_info_with_current_root_volume(self, args, host_info): return (host_info, host_info.get("root_volume")) @@ -312,7 +308,7 @@ def __init__(self, base_command): super(AzureDeleteRootVolumesMethod, self).__init__(base_command) def delete_volumes(self, args): - pass + self.cloud.delete_volumes(args) class AzurePauseInstancesMethod(AbstractInstancesMethod): diff --git a/managed/devops/opscli/ybops/cloud/azure/utils.py b/managed/devops/opscli/ybops/cloud/azure/utils.py index 57a9822658c0..31d7330e604f 100644 --- a/managed/devops/opscli/ybops/cloud/azure/utils.py +++ b/managed/devops/opscli/ybops/cloud/azure/utils.py @@ -546,6 +546,26 @@ def update_os_disk(self, vm_name, os_disk): def delete_disk(self, disk_name): return self.compute_client.disks.begin_delete(RESOURCE_GROUP, os.path.basename(disk_name)) + def delete_disks(self, tags): + if not tags: + raise YBOpsRuntimeError('Tags must be specified') + universe_uuid = tags.get('universe-uuid') + if universe_uuid is None: + raise YBOpsRuntimeError('Universe UUID must be specified') + node_uuid = tags.get('node-uuid') + if node_uuid is None: + raise YBOpsRuntimeError('Node UUID must be specified') + disk_list = self.compute_client.disks.list_by_resource_group(RESOURCE_GROUP) + if disk_list: + for disk in disk_list: + if (disk.disk_state == "Unattached" and disk.tags + and disk.tags.get('universe-uuid') == universe_uuid + and disk.tags.get('node-uuid') == node_uuid): + logging.info("[app] Deleting disk {}".format(disk.name)) + disk_del = self.delete_disk(disk.name) + disk_del.wait() + logging.info("[app] Deleted disk {}".format(disk.name)) + def tag_disks(self, vm, tags): # Updating requires Disk as input rather than OSDisk. Retrieve Disk class with OSDisk name. disk = self.compute_client.disks.get( diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/upgrade/VMImageUpgrade.java b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/upgrade/VMImageUpgrade.java index 6fba0048361e..932210035024 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/upgrade/VMImageUpgrade.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/upgrade/VMImageUpgrade.java @@ -30,10 +30,10 @@ import com.yugabyte.yw.models.helpers.NodeDetails.NodeState; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -145,11 +145,26 @@ public void run() { }); } - private void createVMImageUpgradeTasks(Set nodes) { - createRootVolumeCreationTasks(nodes).setSubTaskGroupType(getTaskSubGroupType()); + private static class ImageSettings { + final String machineImage; + final String sshUserOverride; + final Integer sshPortOverride; + final UUID imageBundleUUID; + + private ImageSettings( + String machineImage, + String sshUserOverride, + Integer sshPortOverride, + UUID imageBundleUUID) { + this.machineImage = machineImage; + this.sshUserOverride = sshUserOverride; + this.sshPortOverride = sshPortOverride; + this.imageBundleUUID = imageBundleUUID; + } + } - Map clusterToImageBundleMap = new HashMap<>(); - Universe universe = getUniverse(); + private Map getImageSettingsForNodes(Set nodes) { + Map result = new LinkedHashMap<>(); UUID imageBundleUUID; for (NodeDetails node : nodes) { UUID region = taskParams().nodeToRegion.get(node.nodeUuid); @@ -188,6 +203,25 @@ private void createVMImageUpgradeTasks(Set nodes) { machineImage); continue; } + result.put( + node, new ImageSettings(machineImage, sshUserOverride, sshPortOverride, imageBundleUUID)); + } + return result; + } + + private void createVMImageUpgradeTasks(Set nodes) { + Map imageSettingsMap = getImageSettingsForNodes(nodes); + + createRootVolumeCreationTasks(imageSettingsMap).setSubTaskGroupType(getTaskSubGroupType()); + + Map clusterToImageBundleMap = new HashMap<>(); + Universe universe = getUniverse(); + for (NodeDetails node : imageSettingsMap.keySet()) { + ImageSettings imageSettings = imageSettingsMap.get(node); + final UUID imageBundleUUID = imageSettings.imageBundleUUID; + final String sshUserOverride = imageSettings.sshUserOverride; + final Integer sshPortOverride = imageSettings.sshPortOverride; + final String machineImage = imageSettings.machineImage; Set processTypes = new LinkedHashSet<>(); if (node.isMaster) { processTypes.add(ServerType.MASTER); @@ -310,49 +344,18 @@ private void createVMImageUpgradeTasks(Set nodes) { .setSubTaskGroupType(getTaskSubGroupType()); } - private SubTaskGroup createRootVolumeCreationTasks(Collection nodes) { + private SubTaskGroup createRootVolumeCreationTasks(Map settingsMap) { Map> rootVolumesPerAZ = - nodes.stream().collect(Collectors.groupingBy(n -> n.azUuid)); + settingsMap.keySet().stream().collect(Collectors.groupingBy(n -> n.azUuid)); SubTaskGroup subTaskGroup = createSubTaskGroup("CreateRootVolumes"); - Universe universe = getUniverse(); rootVolumesPerAZ.forEach( (key, value) -> { NodeDetails node = value.get(0); - UUID region = taskParams().nodeToRegion.get(node.nodeUuid); - String updatedMachineImage = ""; - if (taskParams().imageBundles != null && taskParams().imageBundles.size() > 0) { - UUID imageBundleUUID = retrieveImageBundleUUID(taskParams().imageBundles, node); - ImageBundle.NodeProperties toOverwriteNodeProperties = - imageBundleUtil.getNodePropertiesOrFail( - imageBundleUUID, node.cloudInfo.region, node.cloudInfo.cloud); - updatedMachineImage = toOverwriteNodeProperties.getMachineImage(); - } else { - // Backward compatiblity. - updatedMachineImage = taskParams().machineImages.get(region); - } - final String machineImage = updatedMachineImage; - int numVolumes = value.size(); - - if (!taskParams().forceVMImageUpgrade) { - numVolumes = - (int) - value.stream() - .filter( - n -> { - String existingMachineImage = n.machineImage; - if (StringUtils.isBlank(existingMachineImage)) { - existingMachineImage = retreiveMachineImageForNode(n); - } - return !machineImage.equals(existingMachineImage); - }) - .count(); - } + ImageSettings imageSettings = settingsMap.get(node); - if (numVolumes == 0) { - log.info("Nothing to upgrade in AZ {}", node.cloudInfo.az); - return; - } + final String machineImage = imageSettings.machineImage; + int numVolumes = value.size(); CreateRootVolumes.Params params = new CreateRootVolumes.Params(); Cluster cluster = taskParams().getClusterByUuid(node.placementUuid); From 64aa030676dd0e16a28f801e78123e9d63c20cb4 Mon Sep 17 00:00:00 2001 From: kkannan Date: Thu, 12 Sep 2024 17:22:06 +0530 Subject: [PATCH 27/75] [PLAT-15231]Query Regex not working in logs page - [PLAT-15235] [PLAT-15239][PLAT-15240][PLAT-15241][PLAT-15243] Summary: Fixed multiple bugs related to task details drawer PLAT-15231 - Added Start time to query. PLAT-15235 - Fixed the heading PLAT-15239 - Added the 'Collapse all' Button PLAT-15240 - Added subtaskInfo to the drawer component. These details were not available in old UI. Instead we use the new API called 'details' to fetch the info. PLAT-15241 - Universe Diff was showing incorrect information due to a bug in comparison. We fixed it and also we implemented granular level of comparison. PLAT-15243 - Added the 'Collapse all' Button Test Plan: Tested manually. {F284829} {F284830} {F284831} Reviewers: lsangappa Reviewed By: lsangappa Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38005 --- .../tasks/components/TaskDetailBanner.tsx | 2 +- .../tasks/components/TaskDetailDrawer.tsx | 1 + .../tasks/components/TaskDiffModal.tsx | 12 +- .../components/bannerComp/BannerStyles.ts | 2 +- .../bannerComp/TaskFailedBanner.tsx | 1 - .../tasks/components/diffComp/DiffActions.tsx | 17 +- .../tasks/components/diffComp/DiffCard.tsx | 2 +- .../tasks/components/diffComp/DiffUtils.ts | 47 +++++- .../components/diffComp/diffs/GFlagsDiff.tsx | 39 ++--- .../diffComp/diffs/UniverseDiff.tsx | 72 +++++---- .../tasks/components/diffComp/dtos.ts | 14 +- .../components/drawerComp/SubTaskDetails.tsx | 145 ++++++++++++++---- .../drawerComp/TaskDetailActions.tsx | 4 +- .../components/drawerComp/TaskDetailInfo.tsx | 5 +- .../tasks/components/drawerComp/api.ts | 9 +- .../ui/src/redesign/features/tasks/dtos.ts | 25 ++- managed/ui/src/translations/en.json | 7 +- 17 files changed, 295 insertions(+), 109 deletions(-) diff --git a/managed/ui/src/redesign/features/tasks/components/TaskDetailBanner.tsx b/managed/ui/src/redesign/features/tasks/components/TaskDetailBanner.tsx index a9a35b7ac8ae..f427830fd4af 100644 --- a/managed/ui/src/redesign/features/tasks/components/TaskDetailBanner.tsx +++ b/managed/ui/src/redesign/features/tasks/components/TaskDetailBanner.tsx @@ -28,7 +28,7 @@ export const TaskDetailBanner: FC = ({ taskUUID, universe const [taskID, setTaskID] = useSessionStorage(`taskID`, taskUUID); const [showTaskDetailsDrawer, toggleTaskDetailsDrawer] = useSessionStorage( - `show-task-detail`, + `show-task-detail-${taskID}`, false ); diff --git a/managed/ui/src/redesign/features/tasks/components/TaskDetailDrawer.tsx b/managed/ui/src/redesign/features/tasks/components/TaskDetailDrawer.tsx index 47585eb9c9d1..5c02e8c7d969 100644 --- a/managed/ui/src/redesign/features/tasks/components/TaskDetailDrawer.tsx +++ b/managed/ui/src/redesign/features/tasks/components/TaskDetailDrawer.tsx @@ -144,6 +144,7 @@ export const TaskDetailDrawer: FC = ({ visible, taskUUID, className: classes.dialogContent }} enableBackdropDismiss + keepMounted >
diff --git a/managed/ui/src/redesign/features/tasks/components/TaskDiffModal.tsx b/managed/ui/src/redesign/features/tasks/components/TaskDiffModal.tsx index b8e26a3c42db..5fb939d8c1ef 100644 --- a/managed/ui/src/redesign/features/tasks/components/TaskDiffModal.tsx +++ b/managed/ui/src/redesign/features/tasks/components/TaskDiffModal.tsx @@ -35,7 +35,6 @@ const TaskDiffModal: React.FC = ({ visible, onClose, current // Differ to be used for the current task. const [differ, setDiffer] = useState | null>(null); - const { data: auditData } = useQuery( ['auditData', currentTask?.id], () => getAuditLog(currentTask!.id), @@ -43,12 +42,13 @@ const TaskDiffModal: React.FC = ({ visible, onClose, current enabled: !!currentTask && visible, select: (data) => data.data, onError: () => { - toast.error(t('diffDetailsNotFound')); - } - }); + toast.error(t('diffDetailsNotFound')); + } + } + ); + + const taskDiffDetails = useMemo(() => mapAuditLogToTaskDiffApiResp(auditData), [auditData]); - const taskDiffDetails = mapAuditLogToTaskDiffApiResp(auditData); - useEffect(() => { if (!currentTask || !visible || !taskDiffDetails) { return; diff --git a/managed/ui/src/redesign/features/tasks/components/bannerComp/BannerStyles.ts b/managed/ui/src/redesign/features/tasks/components/bannerComp/BannerStyles.ts index eb49e19d64b3..c28594dc7c04 100644 --- a/managed/ui/src/redesign/features/tasks/components/bannerComp/BannerStyles.ts +++ b/managed/ui/src/redesign/features/tasks/components/bannerComp/BannerStyles.ts @@ -28,6 +28,6 @@ export const useBannerCommonStyles = makeStyles((theme) => ({ width: '1px', height: '24px', background: theme.palette.ybacolors.ybBorderGray, - border: `1px solid ${theme.palette.ybacolors.ybBorderGray}` + border: `0.5px solid ${theme.palette.ybacolors.ybBorderGray}` } })); diff --git a/managed/ui/src/redesign/features/tasks/components/bannerComp/TaskFailedBanner.tsx b/managed/ui/src/redesign/features/tasks/components/bannerComp/TaskFailedBanner.tsx index 8c106a753eb0..efc64bfd79e5 100644 --- a/managed/ui/src/redesign/features/tasks/components/bannerComp/TaskFailedBanner.tsx +++ b/managed/ui/src/redesign/features/tasks/components/bannerComp/TaskFailedBanner.tsx @@ -63,7 +63,6 @@ export const TaskFailedBanner: FC = ({ width={130} />
- void; + onExpandAll: (flag: boolean) => void; changesCount: number; } @@ -21,14 +21,23 @@ export const DiffActions: FC = ({ onExpandAll, changesCount }) const { t } = useTranslation('translation', { keyPrefix: 'taskDetails.diffModal' }); + const [expanded, setExpanded] = useState(false); + return ( {changesCount} changes - - {t('expandAll')} + { + setExpanded(!expanded); + onExpandAll(!expanded); + }} + variant="secondary" + data-testid="diff-expand-all" + > + {expanded ? t('collapseAll') : t('expandAll')} diff --git a/managed/ui/src/redesign/features/tasks/components/diffComp/DiffCard.tsx b/managed/ui/src/redesign/features/tasks/components/diffComp/DiffCard.tsx index 108ae40cc6df..02b9900b0c7e 100644 --- a/managed/ui/src/redesign/features/tasks/components/diffComp/DiffCard.tsx +++ b/managed/ui/src/redesign/features/tasks/components/diffComp/DiffCard.tsx @@ -65,7 +65,7 @@ const useStyles = makeStyles((theme) => ({ fontSize: theme.spacing(4) }, strikeOut: { - textDecoration: 'line-through' + // textDecoration: 'line-through' }, strikeOutRed: { '&>span': { diff --git a/managed/ui/src/redesign/features/tasks/components/diffComp/DiffUtils.ts b/managed/ui/src/redesign/features/tasks/components/diffComp/DiffUtils.ts index 94984275661a..3dafc8e4a013 100644 --- a/managed/ui/src/redesign/features/tasks/components/diffComp/DiffUtils.ts +++ b/managed/ui/src/redesign/features/tasks/components/diffComp/DiffUtils.ts @@ -7,7 +7,7 @@ * http://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt */ -import { isEqual, pick } from 'lodash'; +import { isEqual, keys, pick } from 'lodash'; import { DiffOperation, GFlagDiff } from './dtos'; import { PlacementAZ } from '../../../universe/universe-form/utils/dto'; @@ -24,5 +24,48 @@ export const isAZEqual = (a: PlacementAZ, b: PlacementAZ) => { // returns the operation for a GFlagDiff object export const getGFlagOperation = (gFlag: GFlagDiff) => { - return gFlag.old === null ? DiffOperation.ADDED : gFlag.new === null ? DiffOperation.REMOVED : DiffOperation.CHANGED; + return gFlag.old === null + ? DiffOperation.ADDED + : gFlag.new === null + ? DiffOperation.REMOVED + : DiffOperation.CHANGED; +}; + +export type FieldOperations = { [key in keyof PlacementAZ | string]: DiffOperation | undefined }; + +// checks which fields have changed in a PlacementAZ object +// returns an object with the fields as keys and the operations as values +// isAfterCard is used to determine if we are going to display in before card or after card. +// is before card, we want to stirke through and display the fields in red that are changed. +// but in after card, we want to display the fields in green. +export const getFieldOpertions = ( + beforePlacement?: PlacementAZ, + afterPlacement?: PlacementAZ, + isAfterCard = false +): FieldOperations => { + const fields: FieldOperations = { + name: undefined, + numNodesInAZ: undefined, + isAffinitized: undefined, + replicationFactor: undefined + }; + + if (!beforePlacement) { + return { + ...keys(fields).reduce((acc, key) => ({ ...acc, [key]: DiffOperation.ADDED }), {}) + }; + } + if (!afterPlacement) { + return { + ...keys(fields).reduce((acc, key) => ({ ...acc, [key]: DiffOperation.REMOVED }), {}) + }; + } + keys(fields).forEach((key) => { + if ((beforePlacement as any)[key] !== (afterPlacement as any)[key]) { + fields[key] = isAfterCard ? DiffOperation.ADDED : DiffOperation.CHANGED; + } else { + fields[key] = DiffOperation.UNCHANGED; + } + }); + return fields; }; diff --git a/managed/ui/src/redesign/features/tasks/components/diffComp/diffs/GFlagsDiff.tsx b/managed/ui/src/redesign/features/tasks/components/diffComp/diffs/GFlagsDiff.tsx index a02d80b4c6f3..668fe0a537f9 100644 --- a/managed/ui/src/redesign/features/tasks/components/diffComp/diffs/GFlagsDiff.tsx +++ b/managed/ui/src/redesign/features/tasks/components/diffComp/diffs/GFlagsDiff.tsx @@ -37,8 +37,9 @@ export class GFlagsDiff extends BaseDiff { } getDiffComponent(): React.ReactElement { - - const { beforeData }: { beforeData: GFlagsDiffProps } = this.diffProps as { beforeData: GFlagsDiffProps }; + const { beforeData }: { beforeData: GFlagsDiffProps } = this.diffProps as { + beforeData: GFlagsDiffProps; + }; const cards: Record[]> = { masterGFlags: [], @@ -66,30 +67,30 @@ export class GFlagsDiff extends BaseDiff { beforeData?.gflags?.tserver?.forEach((diff) => { // tserverGFlagsDiffs[key].forEach((diff: GFlagDiff) => { - cards.tserverGFlags.push( - this.cardRefs.push({ current: ref })} - attribute={{ - title: diff.name ?? '' - }} - beforeValue={{ - title: (diff.old as unknown) as string - }} - afterValue={{ - title: (diff.new as unknown) as string - }} - operation={getGFlagOperation(diff)} - /> - ); + cards.tserverGFlags.push( + this.cardRefs.push({ current: ref })} + attribute={{ + title: diff.name ?? '' + }} + beforeValue={{ + title: (diff.old as unknown) as string + }} + afterValue={{ + title: (diff.new as unknown) as string + }} + operation={getGFlagOperation(diff)} + /> + ); // }); }); return ( { + onExpandAll={(flag) => { // Expand all the cards. this.cardRefs.forEach((ref) => { - ref?.current?.onExpand(true); + ref?.current?.onExpand(flag); }); }} // Get the count of changes. diff --git a/managed/ui/src/redesign/features/tasks/components/diffComp/diffs/UniverseDiff.tsx b/managed/ui/src/redesign/features/tasks/components/diffComp/diffs/UniverseDiff.tsx index a42a45e80cbe..381f0f30eab2 100644 --- a/managed/ui/src/redesign/features/tasks/components/diffComp/diffs/UniverseDiff.tsx +++ b/managed/ui/src/redesign/features/tasks/components/diffComp/diffs/UniverseDiff.tsx @@ -25,7 +25,7 @@ import { DiffActions } from '../DiffActions'; import { DiffTitleBanner, TaskDiffBanner } from '../DiffBanners'; import DiffCard, { DiffCardRef } from '../DiffCard'; import { DiffCardWrapper } from '../DiffCardWrapper'; -import { isAZEqual } from '../DiffUtils'; +import { FieldOperations, getFieldOpertions, isAZEqual } from '../DiffUtils'; import { DiffComponentProps, DiffOperation, DiffProps } from '../dtos'; import { BaseDiff } from './BaseDiff'; @@ -167,7 +167,6 @@ export class UniverseDiff extends BaseDiff { if (!beforeRegion && !afterRegion) { throw new Error('Both before and after regions are null'); } - const atttributeCompList = [Region]; const beforeCompList = [
{beforeRegion ? beforeRegion.name : '---'}
]; const afterCompList = [
{afterRegion ? afterRegion.name : '---'}
]; @@ -205,12 +204,12 @@ export class UniverseDiff extends BaseDiff { const azListAdded = differenceWith( afterRegion.azList, beforeRegion.azList, - (a, b) => a.name === b.name && !isAZEqual(a, b) + (a, b) => a.name === b.name ); const azListRemoved = differenceWith( beforeRegion.azList, afterRegion.azList, - (a, b) => a.name === b.name && !isAZEqual(a, b) + (a, b) => a.name === b.name ); // Create diff cards for the added and removed placement AZs. @@ -292,14 +291,23 @@ export class UniverseDiff extends BaseDiff { if (!beforePlacementAZ && !afterPlacementAZ) { throw new Error('Both before and after placement AZs are null'); } + const fieldOperations = getFieldOpertions(beforePlacementAZ, afterPlacementAZ!); + const afterFieldOperations = getFieldOpertions(beforePlacementAZ, afterPlacementAZ!, true); return [ , { operation={DiffOperation.ADDED} // If the placement AZ is removed(existing), display the empty values. displayEmpty={!afterPlacementAZ} + fieldOperations={afterFieldOperations} /> ]; } @@ -328,9 +337,9 @@ export class UniverseDiff extends BaseDiff { return ( { + onExpandAll={(flag) => { this.cardRefs.forEach((ref) => { - ref?.current?.onExpand(true); + ref?.current?.onExpand(flag); }); }} changesCount={ @@ -393,50 +402,51 @@ const useStylePlacementAzComponent = makeStyles((theme) => ({ fontSize: '13px', '& > span': { width: 'fit-content' - } - }, - strikeOutRed: { - '& > span': { - backgroundColor: '#FEEDED', - mixBlendMode: 'darken' - } - }, - strikeOutGreen: { - '& > span': { + }, + '& > .Changed': { + backgroundColor: '#FDE5E5', + mixBlendMode: 'darken', + textDecoration: 'line-through' + }, + '& > .Added': { backgroundColor: '#CDEFE1', mixBlendMode: 'darken' + }, + '& > .Removed': { + backgroundColor: '#FEEDED', + mixBlendMode: 'darken', + textDecoration: 'line-through' } } })); // Component for displaying the placement AZ. + const PlacementAzComponent = ({ placementAz, displayEmpty, - operation + fieldOperations }: { placementAz: PlacementAZ; operation: DiffOperation; displayEmpty?: boolean; + fieldOperations?: FieldOperations; }) => { const classes = useStylePlacementAzComponent(); return ( -
- {displayEmpty ? '---' : placementAz.name} +
+ + {displayEmpty ? '---' : placementAz.name} +
- {displayEmpty ? '---' : placementAz.numNodesInAZ} + + {displayEmpty ? '---' : placementAz.numNodesInAZ} +
- {displayEmpty ? '---' : placementAz.isAffinitized + ''} + + {displayEmpty ? '---' : placementAz.isAffinitized + ''} +
); diff --git a/managed/ui/src/redesign/features/tasks/components/diffComp/dtos.ts b/managed/ui/src/redesign/features/tasks/components/diffComp/dtos.ts index 75744568ad47..09fde99f783b 100644 --- a/managed/ui/src/redesign/features/tasks/components/diffComp/dtos.ts +++ b/managed/ui/src/redesign/features/tasks/components/diffComp/dtos.ts @@ -13,7 +13,8 @@ import { Task } from '../../dtos'; export enum DiffOperation { ADDED = 'Added', REMOVED = 'Removed', - CHANGED = 'Changed' + CHANGED = 'Changed', + UNCHANGED = 'Unchanged' } export interface DiffProps { @@ -38,13 +39,12 @@ export type GFlagDiff = { old: string | null; new: string | null; default: string; -} +}; export interface GFlagsDiffProps { gflags: { - master: GFlagDiff[], - tserver: GFlagDiff[] - } - + master: GFlagDiff[]; + tserver: GFlagDiff[]; + }; } export interface AuditLogProps { customerUUID: string; @@ -55,4 +55,4 @@ export interface AuditLogProps { action: string; taskUUID: string; auditID: number; -}; +} diff --git a/managed/ui/src/redesign/features/tasks/components/drawerComp/SubTaskDetails.tsx b/managed/ui/src/redesign/features/tasks/components/drawerComp/SubTaskDetails.tsx index e6ca537011b9..f977f84e7dd7 100644 --- a/managed/ui/src/redesign/features/tasks/components/drawerComp/SubTaskDetails.tsx +++ b/managed/ui/src/redesign/features/tasks/components/drawerComp/SubTaskDetails.tsx @@ -12,13 +12,14 @@ import clsx from 'clsx'; import { useToggle } from 'react-use'; import { useQuery } from 'react-query'; import { useTranslation } from 'react-i18next'; +import { groupBy, keys, startCase, values } from 'lodash'; import { Collapse, Typography, makeStyles } from '@material-ui/core'; import { YBButton } from '../../../../components'; import { YBLoadingCircleIcon } from '../../../../../components/common/indicators'; -import { getFailedTaskDetails } from './api'; -import { TaskDetails, TaskStates } from '../../dtos'; +import { getFailedTaskDetails, getSubTaskDetails } from './api'; +import { SubTaskInfo, Task, TaskStates } from '../../dtos'; import { TaskDrawerCompProps } from './dtos'; -import { isTaskFailed } from '../../TaskUtils'; +import { isTaskFailed, isTaskRunning } from '../../TaskUtils'; import LinkIcon from '../../../../assets/link.svg'; const useStyles = makeStyles((theme) => ({ @@ -44,7 +45,7 @@ const useStyles = makeStyles((theme) => ({ gap: '8px' }, expandMoreButton: { - flex: '0 0 90px' + flex: '0 0 100px' }, showLog: { fontSize: '12px', @@ -64,6 +65,7 @@ export const SubTaskDetails: FC = ({ currentTask }) => { const classes = useStyles(); const [expandDetails, toggleExpandDetails] = useToggle(false); const failedTask = isTaskFailed(currentTask); + const taskInProgress = isTaskRunning(currentTask); const { t } = useTranslation('translation', { keyPrefix: 'taskDetails.progress' @@ -78,8 +80,36 @@ export const SubTaskDetails: FC = ({ currentTask }) => { } ); + const { data: detailedTaskInfo, isLoading: isSubTaskLoading } = useQuery( + ['subTasks', currentTask.id!], + () => getSubTaskDetails(currentTask.id!), + { + select: (data) => data.data, + enabled: !!currentTask, + refetchInterval: taskInProgress ? 10000 : false + } + ); + if (!currentTask) return null; + // we have duplicate subtasks in the response, so we are filtering out the latest subtask (by last updated time) + const uniqueTasks: Record = {}; + + detailedTaskInfo?.[currentTask.targetUUID]?.[0].subtaskInfos.forEach((task: SubTaskInfo) => { + const key = task.subTaskGroupType + task.taskType; + if (!uniqueTasks[key]) { + uniqueTasks[key] = task; + } else { + const taskToCompare = uniqueTasks[key]; + if (taskToCompare.updateTime < task.updateTime) { + uniqueTasks[key] = task; + } + } + }); + + //group them by Task Group Type + const subTasksList = groupBy(values(uniqueTasks), 'subTaskGroupType'); + const getFailedTaskData = () => { if (isLoading) return ; return ( @@ -98,7 +128,7 @@ export const SubTaskDetails: FC = ({ currentTask }) => { onClick={() => toggleExpandDetails(!expandDetails)} data-testid="expand-failed-task" > - {t('expand')} + {t(expandDetails ? 'viewLess' : 'expand')} } @@ -114,7 +144,10 @@ export const SubTaskDetails: FC = ({ currentTask }) => {
{ - window.open(`/logs/?queryRegex=${currentTask.correlationId}`, '_blank'); + window.open( + `/logs/?queryRegex=${currentTask.correlationId}&startDate=${currentTask.createTime}`, + '_blank' + ); }} > {t('showLog')} @@ -122,16 +155,26 @@ export const SubTaskDetails: FC = ({ currentTask }) => {
)} - {[...currentTask.details.taskDetails].map((task, index) => ( - - ))} + {isSubTaskLoading ? ( + + ) : ( + keys(subTasksList).map((key, index) => ( + + )) + )}
); }; export type SubTaskCardProps = { - subTask: TaskDetails; + subTasks: SubTaskInfo[]; index: number; + category: string; }; const subTaskCardStyles = makeStyles((theme) => ({ @@ -182,18 +225,31 @@ const subTaskCardStyles = makeStyles((theme) => ({ } } }, - content: { - padding: '8px 10px', - marginLeft: '46px', - marginBottom: '8px', - borderRadius: '8px', + subTaskPanel: { + background: 'rgba(240, 244, 247, 0.50)', + display: 'flex', + flexDirection: 'column', + gap: '24px', + padding: '14px', + marginLeft: '20px', marginTop: '16px', - background: theme.palette.grey[100], + borderRadius: '8px' + }, + content: { + borderRadius: '50%', + display: 'flex', + alignItems: 'center', + flexFlow: 'wrap', + gap: '20px', '&.Success,&.Created': { - background: '#EEDED' + '& i': { + color: theme.palette.success[500] + } }, '&.Failure,&.Aborted,&.Abort': { - background: '#FEEDED' + '& i': { + color: theme.palette.error[500] + } } }, rowCollapsed: { @@ -203,16 +259,27 @@ const subTaskCardStyles = makeStyles((theme) => ({ rowExpanded: { transitionDuration: '0.2s', transform: 'rotate(90deg)' + }, + errMsg: { + width: '100%', + marginLeft: '50px', + borderRadius: '8px', + background: theme.palette.error[100], + padding: '8px 10px', + wordBreak: 'break-word' } })); -export const SubTaskCard: FC = ({ subTask, index }) => { +export const SubTaskCard: FC = ({ subTasks, index, category }) => { const classes = subTaskCardStyles(); const [showDetails, toggleDetails] = useToggle(false); + const { t } = useTranslation('translation', { + keyPrefix: 'taskDetails.progress' + }); - const getTaskIcon = () => { - switch (subTask.state) { + const getTaskIcon = (state: Task['status'], position?: number) => { + switch (state) { case TaskStates.RUNNING: return ; case TaskStates.SUCCESS: @@ -225,10 +292,23 @@ export const SubTaskCard: FC = ({ subTask, index }) => { case TaskStates.INITIALIZING: case TaskStates.UNKNOWN: default: - return index; + return position ?? index; } }; + // Get the category status based on the subtasks + // If any of the subtasks is not success, then the category status is the status of the last subtask + // If all the subtasks are success, then the category status is success + let categoryTaskStatus = TaskStates.CREATED; + + for (let i = 0; i < subTasks.length; i++) { + if (subTasks[i].taskState !== TaskStates.SUCCESS) { + categoryTaskStatus = subTasks[i].taskState; + break; + } + categoryTaskStatus = TaskStates.SUCCESS; + } + return (
toggleDetails(!showDetails)}> @@ -239,15 +319,24 @@ export const SubTaskCard: FC = ({ subTask, index }) => { classes.caret )} /> -
- {/* {isTaskRunning ? : index} */} - {getTaskIcon()} +
+ {getTaskIcon(categoryTaskStatus, index)}
- {subTask.title} + {startCase(category)}
-
- {subTask.description} +
+ {subTasks.map((subTask, index) => ( +
+
+ {getTaskIcon(subTask.taskState, index + 1)} +
+ {startCase(subTask.taskType)} + {subTask.details?.error?.message && ( +
{subTask.details?.error?.message}
+ )} +
+ ))}
diff --git a/managed/ui/src/redesign/features/tasks/components/drawerComp/TaskDetailActions.tsx b/managed/ui/src/redesign/features/tasks/components/drawerComp/TaskDetailActions.tsx index 52f94e4c1174..8b7cc144bc00 100644 --- a/managed/ui/src/redesign/features/tasks/components/drawerComp/TaskDetailActions.tsx +++ b/managed/ui/src/redesign/features/tasks/components/drawerComp/TaskDetailActions.tsx @@ -45,8 +45,8 @@ export const TaskDetailActions: FC = ({ currentTask }) => { const dispatch = useDispatch(); // we should refresh the universe info after retrying the task, else the old task banner will be shown - const refreshUniverse = ()=> { - return dispatch(fetchUniverseInfo(currentTask.targetUUID) as any).then((response:any) => { + const refreshUniverse = () => { + return dispatch(fetchUniverseInfo(currentTask.targetUUID) as any).then((response: any) => { return dispatch(fetchUniverseInfoResponse(response.payload)); }); }; diff --git a/managed/ui/src/redesign/features/tasks/components/drawerComp/TaskDetailInfo.tsx b/managed/ui/src/redesign/features/tasks/components/drawerComp/TaskDetailInfo.tsx index f51475d4f8f0..574117109abc 100644 --- a/managed/ui/src/redesign/features/tasks/components/drawerComp/TaskDetailInfo.tsx +++ b/managed/ui/src/redesign/features/tasks/components/drawerComp/TaskDetailInfo.tsx @@ -8,6 +8,7 @@ */ import { FC, Fragment } from 'react'; +import { startCase } from 'lodash'; import { useTranslation } from 'react-i18next'; import { Typography, makeStyles } from '@material-ui/core'; import { ybFormatDate } from '../../../../helpers/DateUtils'; @@ -79,11 +80,11 @@ export const TaskDetailInfo: FC = ({ currentTask }) => { const taskInfo: TaskInfo[] = [ { label: t('type'), - value: currentTask.typeName + value: `${startCase(currentTask.type)} ${currentTask.target}` }, { label: t('target'), - value: currentTask.target + value: currentTask.title.replace(/.*:\s*/, '') }, { label: t('status'), diff --git a/managed/ui/src/redesign/features/tasks/components/drawerComp/api.ts b/managed/ui/src/redesign/features/tasks/components/drawerComp/api.ts index b9d6663bfac8..ee643f1b29b0 100644 --- a/managed/ui/src/redesign/features/tasks/components/drawerComp/api.ts +++ b/managed/ui/src/redesign/features/tasks/components/drawerComp/api.ts @@ -9,7 +9,7 @@ import axios from 'axios'; import { ROOT_URL } from '../../../../../config'; -import { FailedTask } from '../../dtos'; +import { FailedTask, SubTaskDetailsResp } from '../../dtos'; /** * Get the details of the failed task. @@ -43,3 +43,10 @@ export const abortTask = (taskUUID: string) => { const cUUID = localStorage.getItem('customerId'); return axios.post(`${ROOT_URL}/customers/${cUUID}/tasks/${taskUUID}/abort`); }; +/** + * fetch subTaskInfos + */ +export const getSubTaskDetails = (taskUUID: string) => { + const cUUID = localStorage.getItem('customerId'); + return axios.get(`${ROOT_URL}/customers/${cUUID}/tasks/${taskUUID}/details`); +}; diff --git a/managed/ui/src/redesign/features/tasks/dtos.ts b/managed/ui/src/redesign/features/tasks/dtos.ts index 005084078a5c..228b31bc1d3f 100644 --- a/managed/ui/src/redesign/features/tasks/dtos.ts +++ b/managed/ui/src/redesign/features/tasks/dtos.ts @@ -60,6 +60,7 @@ export interface Task { retryable: boolean; correlationId: string; userEmail: string; + subtaskInfos: SubTaskInfo[]; } export interface FailedTask { @@ -70,4 +71,26 @@ export interface FailedTask { subTaskType: string; subTaskUUID: string; }[]; -} +}; + +export interface SubTaskInfo { + uuid: string; + parentUuid: Task['id']; + taskType: string; + taskState: TaskStates; + subTaskGroupType: Task['type']; + createTime: number; + updateTime: number; + percentDone: number; + position: number; + details?: { + error?: { + code: string; + message: string; + } + } +}; + +export type SubTaskDetailsResp = { + [key: string]: Task[]; +}; diff --git a/managed/ui/src/translations/en.json b/managed/ui/src/translations/en.json index d53eb5acccb8..657cd41b8742 100644 --- a/managed/ui/src/translations/en.json +++ b/managed/ui/src/translations/en.json @@ -2218,7 +2218,7 @@ "info": { "task": "Task", "type": "Type", - "target": "Target", + "target": "Performed On", "status": "Status", "startTime": "Start Time", "endTime": "End time", @@ -2234,7 +2234,9 @@ "title": "Progress", "complete": "{{progress}}% Completed", "expand": "Expand", - "showLog": "YugaWare Log" + "viewLess": "Collapse", + "showLog": "YugaWare Log", + "percentComplete": "{{percent}}% Completed" }, "banner": { "viewDetails": "View Details", @@ -2247,6 +2249,7 @@ "diffModal": { "title": "Universe Changes", "expandAll": "Expand All", + "collapseAll": "Collapse All", "diffDetailsNotFound": "Unable to find diff details", "alerts": { "inProgress": "This operation intends to modify the following {{count}} value(s). ", From e9ab17dea0d3b4f1673531c07404a290f9fbd8f2 Mon Sep 17 00:00:00 2001 From: Abhinab Saha Date: Tue, 10 Sep 2024 18:07:14 +0530 Subject: [PATCH 28/75] [#23835] ASH: Update the field in /rpcz from yql_endpoint_tserver_uuid to top_level_node_id Summary: In D32646 / f9ca64778dc82be392c200cba5bee28c851d6a01, yql_endpoint_tserver_uuid was updated to top_level_node_id in the view yb_active_session_history. However, the term in /rpcz is inconsistent with this, as it's still yql_endpoint_tserver_uuid. This revision replaces all occurrences of yql_endpoint_tserver_uuid with top_level_node_id. **Upgrade/Rollback safety:** A field name is changed, it should be safe to upgrade/downgrade Jira: DB-12739 Test Plan: Jenkins Reviewers: amitanand, hbhanawat Reviewed By: amitanand Subscribers: esheng, yql, ybase Differential Revision: https://phorge.dev.yugabyte.com/D37935 --- src/postgres/src/backend/utils/misc/yb_ash.c | 24 ++++++++++---------- src/yb/ash/wait_state-test.cc | 14 ++++++------ src/yb/ash/wait_state.cc | 6 ++--- src/yb/ash/wait_state.h | 24 ++++++++++---------- src/yb/common/common.proto | 2 +- src/yb/server/rpcz-path-handler.cc | 8 +++---- src/yb/tserver/pg_client_service.cc | 2 +- src/yb/yql/cql/cqlserver/cql_service.cc | 2 +- src/yb/yql/pggate/pg_client.cc | 4 ++-- src/yb/yql/pggate/ybc_pg_typedefs.h | 6 ++--- src/yb/yql/pggate/ybc_pggate.cc | 8 +++---- 11 files changed, 50 insertions(+), 50 deletions(-) diff --git a/src/postgres/src/backend/utils/misc/yb_ash.c b/src/postgres/src/backend/utils/misc/yb_ash.c index 38dbc92298a4..782682b97118 100644 --- a/src/postgres/src/backend/utils/misc/yb_ash.c +++ b/src/postgres/src/backend/utils/misc/yb_ash.c @@ -129,7 +129,7 @@ static void yb_ash_ProcessUtility(PlannedStmt *pstmt, const char *queryString, QueryEnvironment *queryEnv, DestReceiver *dest, char *completionTag); -static const unsigned char *get_yql_endpoint_tserver_uuid(); +static const unsigned char *get_top_level_node_id(); static void YbAshMaybeReplaceSample(PGPROC *proc, int num_procs, TimestampTz sample_time, int samples_considered); static void copy_pgproc_sample_fields(PGPROC *proc, int index); @@ -774,7 +774,7 @@ YbAshMain(Datum main_arg) } static const unsigned char * -get_yql_endpoint_tserver_uuid() +get_top_level_node_id() { static const unsigned char *local_tserver_uuid = NULL; if (!local_tserver_uuid && IsYugaByteEnabled()) @@ -852,11 +852,11 @@ copy_non_pgproc_sample_fields(TimestampTz sample_time, int index) { YBCAshSample *cb_sample = &yb_ash->circular_buffer[index]; - /* yql_endpoint_tserver_uuid is constant for all PG samples */ - if (get_yql_endpoint_tserver_uuid()) - memcpy(cb_sample->yql_endpoint_tserver_uuid, - get_yql_endpoint_tserver_uuid(), - sizeof(cb_sample->yql_endpoint_tserver_uuid)); + /* top_level_node_id is constant for all PG samples */ + if (get_top_level_node_id()) + memcpy(cb_sample->top_level_node_id, + get_top_level_node_id(), + sizeof(cb_sample->top_level_node_id)); /* rpc_request_id is 0 for PG samples */ cb_sample->rpc_request_id = 0; @@ -958,7 +958,7 @@ yb_active_session_history(PG_FUNCTION_ARGS) bool nulls[ncols]; int j = 0; pg_uuid_t root_request_id; - pg_uuid_t yql_endpoint_tserver_uuid; + pg_uuid_t top_level_node_id; /* 22 bytes required for ipv4 and 48 for ipv6 (including null character) */ char client_node_ip[48]; @@ -988,8 +988,8 @@ yb_active_session_history(PG_FUNCTION_ARGS) values[j++] = CStringGetTextDatum( pgstat_get_wait_event(sample->encoded_wait_event_code)); - uchar_to_uuid(sample->yql_endpoint_tserver_uuid, &yql_endpoint_tserver_uuid); - values[j++] = UUIDPGetDatum(&yql_endpoint_tserver_uuid); + uchar_to_uuid(sample->top_level_node_id, &top_level_node_id); + values[j++] = UUIDPGetDatum(&top_level_node_id); values[j++] = UInt64GetDatum(metadata->query_id); values[j++] = Int32GetDatum(metadata->pid); @@ -1362,7 +1362,7 @@ FormatAshSampleAsCsv(YBCAshSample *ash_data_buffer, int total_elements_to_dump, pgstat_get_wait_event(sample->encoded_wait_event_code)); /* Top level node id */ - PrintUuidToBuffer(output_buffer, sample->yql_endpoint_tserver_uuid); + PrintUuidToBuffer(output_buffer, sample->top_level_node_id); appendStringInfo(output_buffer, ",%ld,%d,%s,%s,%f,%s,%d\n", (int64) sample->metadata.query_id, sample->metadata.pid, @@ -1372,4 +1372,4 @@ FormatAshSampleAsCsv(YBCAshSample *ash_data_buffer, int total_elements_to_dump, pgstat_get_wait_event_type(sample->encoded_wait_event_code), sample->metadata.database_id); } -} \ No newline at end of file +} diff --git a/src/yb/ash/wait_state-test.cc b/src/yb/ash/wait_state-test.cc index 427374d628d9..5282b44813d2 100644 --- a/src/yb/ash/wait_state-test.cc +++ b/src/yb/ash/wait_state-test.cc @@ -33,7 +33,7 @@ HostPort RandomHostPort() { AshMetadata GenerateRandomMetadata() { return AshMetadata{ .root_request_id{Uuid::Generate()}, - .yql_endpoint_tserver_uuid{Uuid::Generate()}, + .top_level_node_id{Uuid::Generate()}, .query_id = RandomUniformInt(), .pid = RandomUniformInt(), .database_id = RandomUniformInt(), @@ -46,10 +46,10 @@ void testToAndFromPB() { AshMetadataPB pb; meta1.ToPB(&pb); ASSERT_EQ(pb.root_request_id().size(), kUuidSize); - ASSERT_EQ(pb.yql_endpoint_tserver_uuid().size(), kUuidSize); + ASSERT_EQ(pb.top_level_node_id().size(), kUuidSize); AshMetadata meta2 = AshMetadata::FromPB(pb); ASSERT_EQ(meta1.root_request_id, meta2.root_request_id); - ASSERT_EQ(meta1.yql_endpoint_tserver_uuid, meta2.yql_endpoint_tserver_uuid); + ASSERT_EQ(meta1.top_level_node_id, meta2.top_level_node_id); ASSERT_EQ(meta1.query_id, meta2.query_id); ASSERT_EQ(meta1.pid, meta2.pid); ASSERT_EQ(meta1.database_id, meta2.database_id); @@ -73,7 +73,7 @@ TEST(WaitStateTest, TestUpdate) { HostPortToPB(RandomHostPort(), pb1.mutable_client_host_port()); meta1.UpdateFrom(AshMetadata::FromPB(pb1)); ASSERT_EQ(meta1.root_request_id, pb1_root_request_id); - ASSERT_EQ(meta1.yql_endpoint_tserver_uuid, meta1_copy.yql_endpoint_tserver_uuid); + ASSERT_EQ(meta1.top_level_node_id, meta1_copy.top_level_node_id); ASSERT_EQ(meta1.query_id, pb1.query_id()); ASSERT_EQ(meta1.pid, pb1.pid()); ASSERT_EQ(meta1.database_id, meta1_copy.database_id); @@ -83,12 +83,12 @@ TEST(WaitStateTest, TestUpdate) { meta1 = meta1_copy; // Update 2 other fields, rest unset. AshMetadataPB pb2; - auto pb2_yql_endpoint_tserver_uuid = Uuid::Generate(); - pb2_yql_endpoint_tserver_uuid.ToBytes(pb2.mutable_yql_endpoint_tserver_uuid()); + auto pb2_top_level_node_id = Uuid::Generate(); + pb2_top_level_node_id.ToBytes(pb2.mutable_top_level_node_id()); pb2.set_rpc_request_id(RandomUniformInt()); meta1.UpdateFrom(AshMetadata::FromPB(pb2)); ASSERT_EQ(meta1.root_request_id, meta1_copy.root_request_id); - ASSERT_EQ(meta1.yql_endpoint_tserver_uuid, pb2_yql_endpoint_tserver_uuid); + ASSERT_EQ(meta1.top_level_node_id, pb2_top_level_node_id); ASSERT_EQ(meta1.query_id, meta1_copy.query_id); ASSERT_EQ(meta1.pid, meta1_copy.pid); ASSERT_EQ(meta1.rpc_request_id, pb2.rpc_request_id()); diff --git a/src/yb/ash/wait_state.cc b/src/yb/ash/wait_state.cc index 0a5a97e12867..a0fccef77dd0 100644 --- a/src/yb/ash/wait_state.cc +++ b/src/yb/ash/wait_state.cc @@ -253,7 +253,7 @@ void AshMetadata::set_client_host_port(const HostPort &host_port) { std::string AshMetadata::ToString() const { return YB_STRUCT_TO_STRING( - yql_endpoint_tserver_uuid, root_request_id, query_id, database_id, + top_level_node_id, root_request_id, query_id, database_id, rpc_request_id, client_host_port); } @@ -331,9 +331,9 @@ void WaitStateInfo::set_client_host_port(const HostPort &host_port) { metadata_.set_client_host_port(host_port); } -void WaitStateInfo::set_yql_endpoint_tserver_uuid(const Uuid &yql_endpoint_tserver_uuid) { +void WaitStateInfo::set_top_level_node_id(const Uuid &top_level_node_id) { std::lock_guard lock(mutex_); - metadata_.yql_endpoint_tserver_uuid = yql_endpoint_tserver_uuid; + metadata_.top_level_node_id = top_level_node_id; } void WaitStateInfo::UpdateMetadata(const AshMetadata &meta) { diff --git a/src/yb/ash/wait_state.h b/src/yb/ash/wait_state.h index 555cccef9f9c..d1541b105434 100644 --- a/src/yb/ash/wait_state.h +++ b/src/yb/ash/wait_state.h @@ -222,7 +222,7 @@ WaitStateType GetWaitStateType(WaitStateCode code); struct AshMetadata { Uuid root_request_id = Uuid::Nil(); - Uuid yql_endpoint_tserver_uuid = Uuid::Nil(); + Uuid top_level_node_id = Uuid::Nil(); uint64_t query_id = 0; pid_t pid = 0; uint32_t database_id = 0; @@ -238,8 +238,8 @@ struct AshMetadata { if (!other.root_request_id.IsNil()) { root_request_id = other.root_request_id; } - if (!other.yql_endpoint_tserver_uuid.IsNil()) { - yql_endpoint_tserver_uuid = other.yql_endpoint_tserver_uuid; + if (!other.top_level_node_id.IsNil()) { + top_level_node_id = other.top_level_node_id; } if (other.query_id != 0) { query_id = other.query_id; @@ -268,10 +268,10 @@ struct AshMetadata { } else { pb->clear_root_request_id(); } - if (!yql_endpoint_tserver_uuid.IsNil()) { - yql_endpoint_tserver_uuid.ToBytes(pb->mutable_yql_endpoint_tserver_uuid()); + if (!top_level_node_id.IsNil()) { + top_level_node_id.ToBytes(pb->mutable_top_level_node_id()); } else { - pb->clear_yql_endpoint_tserver_uuid(); + pb->clear_top_level_node_id(); } if (query_id != 0) { pb->set_query_id(query_id); @@ -315,17 +315,17 @@ struct AshMetadata { root_request_id = *result; } } - Uuid yql_endpoint_tserver_uuid = Uuid::Nil(); - if (pb.has_yql_endpoint_tserver_uuid()) { - Result result = Uuid::FromSlice(pb.yql_endpoint_tserver_uuid()); + Uuid top_level_node_id = Uuid::Nil(); + if (pb.has_top_level_node_id()) { + Result result = Uuid::FromSlice(pb.top_level_node_id()); WARN_NOT_OK(result, "Could not decode uuid from protobuf."); if (result.ok()) { - yql_endpoint_tserver_uuid = *result; + top_level_node_id = *result; } } return AshMetadata{ root_request_id, // root_request_id - yql_endpoint_tserver_uuid, // yql_endpoint_tserver_uuid + top_level_node_id, // top_level_node_id pb.query_id(), // query_id pb.pid(), // pid pb.database_id(), // database_id @@ -368,7 +368,7 @@ class WaitStateInfo { std::atomic& mutable_code(); void set_root_request_id(const Uuid& id) EXCLUDES(mutex_); - void set_yql_endpoint_tserver_uuid(const Uuid& yql_endpoint_tserver_uuid) EXCLUDES(mutex_); + void set_top_level_node_id(const Uuid& top_level_node_id) EXCLUDES(mutex_); uint64_t query_id() EXCLUDES(mutex_); void set_query_id(uint64_t query_id) EXCLUDES(mutex_); int64_t rpc_request_id() EXCLUDES(mutex_); diff --git a/src/yb/common/common.proto b/src/yb/common/common.proto index 22aa87ffd575..d7701e51e8db 100644 --- a/src/yb/common/common.proto +++ b/src/yb/common/common.proto @@ -668,7 +668,7 @@ message AshMetadataPB { // The uuid of the local tserver which received the request from the client. // i.e. the local tserver running the YSQL-Pg backend/YCQL proxy. if set, // should be 16 bytes in length. - optional bytes yql_endpoint_tserver_uuid = 3; + optional bytes top_level_node_id = 3; // The request id for the "current" rpc. This may be different than // the root level request id. e.g. consider a write-rpc sent by the local // TServer to the destination TServer. diff --git a/src/yb/server/rpcz-path-handler.cc b/src/yb/server/rpcz-path-handler.cc index ae1c555102c9..15d2323f6eb2 100644 --- a/src/yb/server/rpcz-path-handler.cc +++ b/src/yb/server/rpcz-path-handler.cc @@ -67,7 +67,7 @@ void MakeAshUuidsHumanReadable(rpc::RpcCallInProgressPB* pb) { return; } AshMetadataPB* metadata_pb = pb->mutable_wait_state()->mutable_metadata(); - // Convert root_request_id and yql_endpoint_tserver_uuid from binary to + // Convert root_request_id and top_level_node_id from binary to // human-readable formats if (metadata_pb->has_root_request_id()) { Result result = Uuid::FromSlice(metadata_pb->root_request_id()); @@ -75,10 +75,10 @@ void MakeAshUuidsHumanReadable(rpc::RpcCallInProgressPB* pb) { metadata_pb->set_root_request_id(result.ToString()); } } - if (metadata_pb->has_yql_endpoint_tserver_uuid()) { - Result result = Uuid::FromSlice(metadata_pb->yql_endpoint_tserver_uuid()); + if (metadata_pb->has_top_level_node_id()) { + Result result = Uuid::FromSlice(metadata_pb->top_level_node_id()); if (result.ok()) { - metadata_pb->set_yql_endpoint_tserver_uuid(result.ToString()); + metadata_pb->set_top_level_node_id(result.ToString()); } } } diff --git a/src/yb/tserver/pg_client_service.cc b/src/yb/tserver/pg_client_service.cc index d717f4ee98d6..231fb1789700 100644 --- a/src/yb/tserver/pg_client_service.cc +++ b/src/yb/tserver/pg_client_service.cc @@ -1461,7 +1461,7 @@ class PgClientServiceImpl::Impl { continue; } if (local_uuid) { - local_uuid->ToBytes(wait_state_pb.mutable_metadata()->mutable_yql_endpoint_tserver_uuid()); + local_uuid->ToBytes(wait_state_pb.mutable_metadata()->mutable_top_level_node_id()); } MaybeIncludeSample(resp, wait_state_pb, sample_size, samples_considered); } diff --git a/src/yb/yql/cql/cqlserver/cql_service.cc b/src/yb/yql/cql/cqlserver/cql_service.cc index 729097789383..017c931ca1c3 100644 --- a/src/yb/yql/cql/cqlserver/cql_service.cc +++ b/src/yb/yql/cql/cqlserver/cql_service.cc @@ -222,7 +222,7 @@ void CQLServiceImpl::Handle(yb::rpc::InboundCallPtr inbound_call) { ? AF_INET : AF_INET6)}; auto uuid_res = Uuid::FromHexStringBigEndian(server_->instance_pb().permanent_uuid()); if (uuid_res.ok()) { - metadata.yql_endpoint_tserver_uuid = *uuid_res; + metadata.top_level_node_id = *uuid_res; } wait_state->UpdateMetadata(metadata); } diff --git a/src/yb/yql/pggate/pg_client.cc b/src/yb/yql/pggate/pg_client.cc index 813ea0d857f8..56fba06e66d7 100644 --- a/src/yb/yql/pggate/pg_client.cc +++ b/src/yb/yql/pggate/pg_client.cc @@ -159,7 +159,7 @@ void AshMetadataToPB(const YBCPgAshConfig& ash_config, tserver::PgPerformOptions auto* ash_metadata = options->mutable_ash_metadata(); const auto* pg_metadata = ash_config.metadata; - ash_metadata->set_yql_endpoint_tserver_uuid(ash_config.yql_endpoint_tserver_uuid, 16); + ash_metadata->set_top_level_node_id(ash_config.top_level_node_id, 16); ash_metadata->set_root_request_id(pg_metadata->root_request_id, 16); ash_metadata->set_query_id(pg_metadata->query_id); ash_metadata->set_pid(pg_metadata->pid); @@ -401,7 +401,7 @@ class PgClient::Impl : public BigDataFetcher { heartbeat_poller_.Start(scheduler, FLAGS_pg_client_heartbeat_interval_ms * 1ms); ash_config_ = *ash_config; - memcpy(ash_config_.yql_endpoint_tserver_uuid, tserver_shared_data_.tserver_uuid(), 16); + memcpy(ash_config_.top_level_node_id, tserver_shared_data_.tserver_uuid(), 16); return Status::OK(); } diff --git a/src/yb/yql/pggate/ybc_pg_typedefs.h b/src/yb/yql/pggate/ybc_pg_typedefs.h index 105fee5fc6fc..68ba8b1abd35 100644 --- a/src/yb/yql/pggate/ybc_pg_typedefs.h +++ b/src/yb/yql/pggate/ybc_pg_typedefs.h @@ -703,7 +703,7 @@ typedef struct PgYCQLStatementStats { // Struct to store ASH samples in the circular buffer. typedef struct AshSample { // Metadata of the sample. - // yql_endpoint_tserver_uuid and rpc_request_id are also part of the metadata, + // top_level_node_id and rpc_request_id are also part of the metadata, // but the reason to not store them inside YBCAshMetadata is that these remain // constant in PG for all the samples of a particular node. So we don't store it // in YBCAshMetadata, which is stored in the procarray to save shared memory. @@ -712,7 +712,7 @@ typedef struct AshSample { // UUID of the TServer where the query generated. // This remains constant for PG samples on a node, but can differ for TServer // samples as TServer can be processing requests from other nodes. - unsigned char yql_endpoint_tserver_uuid[16]; + unsigned char top_level_node_id[16]; // A single query can generate multiple RPCs, this is used to differentiate // those RPCs. This will always be 0 for PG samples @@ -737,7 +737,7 @@ typedef struct AshSample { typedef struct PgAshConfig { YBCAshMetadata* metadata; bool* yb_enable_ash; - unsigned char yql_endpoint_tserver_uuid[16]; + unsigned char top_level_node_id[16]; // length of host should be equal to INET6_ADDRSTRLEN char host[46]; } YBCPgAshConfig; diff --git a/src/yb/yql/pggate/ybc_pggate.cc b/src/yb/yql/pggate/ybc_pggate.cc index fbfe48dcaadd..b357248e92f2 100644 --- a/src/yb/yql/pggate/ybc_pggate.cc +++ b/src/yb/yql/pggate/ybc_pggate.cc @@ -1,4 +1,4 @@ -// Copyright (c) YugaByteDB, Inc. +// Copyright (c) YugabyteDB, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at @@ -378,9 +378,9 @@ void AshCopyTServerSample( tserver_metadata.root_request_id().data(), sizeof(cb_metadata->root_request_id)); - std::memcpy(cb_sample->yql_endpoint_tserver_uuid, - tserver_metadata.yql_endpoint_tserver_uuid().data(), - sizeof(cb_sample->yql_endpoint_tserver_uuid)); + std::memcpy(cb_sample->top_level_node_id, + tserver_metadata.top_level_node_id().data(), + sizeof(cb_sample->top_level_node_id)); AshCopyAuxInfo(tserver_sample, component, cb_sample); From c853c56d3201bd2e99e767c7dcffa1e125897bbe Mon Sep 17 00:00:00 2001 From: Dmitry Uspenskiy <47734295+d-uspenskiy@users.noreply.github.com> Date: Mon, 16 Sep 2024 12:52:53 +0300 Subject: [PATCH 29/75] [#23899] YSQL: Support assertThrows in the org.yb.AssertionWrappers Summary: The `assertThrows` method of JUnit helps to improve readability of java unit tests in some cases. Original code ``` try { s1.executeBatch(); fail("Internal retries are not supported in batched execution mode"); } catch (BatchUpdateException e) { LOG.info(e.toString()); } ``` Same code with `assertThrows` ``` assertThrows("Internal retries are not supported in batched execution mode", BatchUpdateException.class, () -> s1.executeBatch()); ``` **Note:** JUnit supports `assertThrows` since version 4.13, so the version was updated Jira: DB-12803 Test Plan: Jenkins Reviewers: patnaik.balivada, tfoucher, tnayak Reviewed By: patnaik.balivada Subscribers: yql Tags: #jenkins-ready Differential Revision: https://phorge.dev.yugabyte.com/D38002 --- java/pom.xml | 2 +- .../test/java/org/yb/AssertionWrappers.java | 14 ++++++++++- .../test/java/org/yb/pgsql/TestPgBatch.java | 23 ++++++++----------- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/java/pom.xml b/java/pom.xml index 0353c000e273..f1c3f9f6adc2 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -100,7 +100,7 @@ 3.0.1 junit - 4.12 + 4.13 4.7.0 1.0.0 diff --git a/java/yb-client/src/test/java/org/yb/AssertionWrappers.java b/java/yb-client/src/test/java/org/yb/AssertionWrappers.java index 91bc805d9689..6f603c021c08 100644 --- a/java/yb-client/src/test/java/org/yb/AssertionWrappers.java +++ b/java/yb-client/src/test/java/org/yb/AssertionWrappers.java @@ -1,4 +1,4 @@ -// Copyright (c) YugaByte, Inc. +// Copyright (c) Yugabyte, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at @@ -14,6 +14,7 @@ import org.hamcrest.Matcher; import org.junit.Assert; +import org.junit.function.ThrowingRunnable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -823,4 +824,15 @@ public static void assertLessThan(String message, Comparable a, T b) { public static void assertLessThanOrEqualTo(String message, Comparable a, T b) { wrapAssertion(() -> Assert.assertTrue(message, a.compareTo(b) <= 0)); } + + public static void assertThrows( + String message, Class expectedType, ThrowingRunnable runnable) { + wrapAssertion(() -> LOG.info( + "Expected exception: " + Assert.assertThrows(message, expectedType, runnable).toString())); + } + + public static void assertThrows( + Class expectedType, ThrowingRunnable runnable) { + assertThrows(null, expectedType, runnable); + } } diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgBatch.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgBatch.java index 8cc6c5cf00d9..d17191fc70d5 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgBatch.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgBatch.java @@ -1,4 +1,4 @@ -// Copyright (c) YugaByte, Inc. +// Copyright (c) YugabyteDB, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at @@ -17,8 +17,8 @@ import static org.yb.AssertionWrappers.assertFalse; import static org.yb.AssertionWrappers.assertGreaterThan; import static org.yb.AssertionWrappers.assertNotEquals; +import static org.yb.AssertionWrappers.assertThrows; import static org.yb.AssertionWrappers.assertTrue; -import static org.yb.AssertionWrappers.fail; import com.yugabyte.util.PSQLException; import java.sql.BatchUpdateException; @@ -166,18 +166,13 @@ public void testSchemaMismatchRetry() throws Throwable { // Execute ALTER in a different session c2 so as not to invalidate // the catalog cache of c1 until the next heartbeat with the master. s2.execute("ALTER TABLE t ALTER COLUMN v SET NOT NULL"); - try { - // This uses the cached catalog version but the schema is changed - // by the ALTER TABLE statement above. This should cause a schema - // mismatch error. The schema mismatch error is not retried internally - // in batched execution mode. - s1.executeBatch(); - // Should not reach here since we do not support retries in batched - // execution mode for schema mismatch errors. - fail("Internal retries are not supported in batched execution mode"); - } catch (BatchUpdateException e) { - LOG.info(e.toString()); - } + + // The s1 statement uses the cached catalog version but the schema is changed by the + // ALTER TABLE statement above. The s1 statement execution should cause the schema mismatch + // error. The schema mismatch error is not retried internally in batched execution mode. + assertThrows( + "Internal retries are not supported in batched execution mode", + BatchUpdateException.class, () -> s1.executeBatch()); } } From fdf8a67114101776fd1f21c10a30e3436e4dd914 Mon Sep 17 00:00:00 2001 From: Shubham Date: Mon, 16 Sep 2024 18:19:35 +0530 Subject: [PATCH 30/75] [PLAT-15295] Use Pagination for querying LDAP server Summary: Use pagincation for querying LDAP server. Also, introduces API param to sync only specific groups to DB Test Plan: Tested LDAP sync. Also, amex have confirmed the functionality on custom build Reviewers: amalyshev, alan, nbhatia, anijhawan, #yba-api-review!, sneelakantan Reviewed By: amalyshev Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38056 --- .../subtasks/ldapsync/QueryLdapServer.java | 142 +++++++++++------- .../yw/common/config/GlobalConfKeys.java | 8 + .../yw/forms/LdapUnivSyncFormData.java | 17 ++- managed/src/main/resources/reference.conf | 1 + .../src/main/resources/swagger-strict.json | 13 +- managed/src/main/resources/swagger.json | 13 +- 6 files changed, 124 insertions(+), 70 deletions(-) diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/subtasks/ldapsync/QueryLdapServer.java b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/subtasks/ldapsync/QueryLdapServer.java index 11d9eefd438d..a7f671d91498 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/subtasks/ldapsync/QueryLdapServer.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/subtasks/ldapsync/QueryLdapServer.java @@ -14,13 +14,17 @@ import javax.inject.Inject; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; -import org.apache.directory.api.ldap.model.cursor.CursorException; -import org.apache.directory.api.ldap.model.cursor.EntryCursor; +import org.apache.directory.api.ldap.model.cursor.SearchCursor; import org.apache.directory.api.ldap.model.entry.Attribute; import org.apache.directory.api.ldap.model.entry.Entry; import org.apache.directory.api.ldap.model.entry.Value; -import org.apache.directory.api.ldap.model.exception.LdapException; +import org.apache.directory.api.ldap.model.message.SearchRequest; +import org.apache.directory.api.ldap.model.message.SearchRequestImpl; +import org.apache.directory.api.ldap.model.message.SearchResultDone; import org.apache.directory.api.ldap.model.message.SearchScope; +import org.apache.directory.api.ldap.model.message.controls.PagedResults; +import org.apache.directory.api.ldap.model.message.controls.PagedResultsImpl; +import org.apache.directory.api.ldap.model.name.Dn; import org.apache.directory.ldap.client.api.LdapNetworkConnection; @Slf4j @@ -52,74 +56,100 @@ private String retrieveValueFromDN(String dn, String attribute) { // query the LDAP server, extract user and group data, and organize it into a user-to-group // mapping. private void queryLdap(LdapNetworkConnection connection, boolean enabledDetailedLogs) - throws LdapException, CursorException { + throws Exception { LdapUnivSyncFormData ldapUnivSyncFormData = taskParams().ldapUnivSyncFormData; - EntryCursor cursor = - connection.search( - ldapUnivSyncFormData.getLdapBasedn(), - ldapUnivSyncFormData.getLdapSearchFilter(), - SearchScope.SUBTREE, - "*"); - - while (cursor.next()) { - Entry entry = cursor.get(); - if (enabledDetailedLogs) { - log.debug("LDAP user entry retrieved: {}", entry.toString()); - } + byte[] cookie = null; + Integer ldapQueryPageSize = confGetter.getGlobalConf(GlobalConfKeys.ldapPageQuerySize); + + do { + // Setup the paged results control + PagedResults pagedResultsControl = new PagedResultsImpl(); + pagedResultsControl.setSize(ldapQueryPageSize); // Adjust page size as needed + pagedResultsControl.setCookie(cookie); + + SearchRequest searchRequest = new SearchRequestImpl(); + searchRequest.setBase(new Dn(ldapUnivSyncFormData.getLdapBasedn())); + searchRequest.setFilter(ldapUnivSyncFormData.getLdapSearchFilter()); + searchRequest.setScope(SearchScope.SUBTREE); + searchRequest.addAttributes("*", "+"); + searchRequest.addControl(pagedResultsControl); + + // Execute the search + SearchCursor cursor = connection.search(searchRequest); + + while (cursor.next()) { + // Retrieve the entry from the cursor + Entry entry = cursor.getEntry(); - // search for the userfield in the DN - String dn = entry.getDn().toString(); - String userKey = retrieveValueFromDN(dn, ldapUnivSyncFormData.getLdapUserfield()); - if (StringUtils.isEmpty(userKey)) { if (enabledDetailedLogs) { - log.debug( - "User dn {} does not contain {}(userfield). Fetching user attributes...", - dn, - ldapUnivSyncFormData.getLdapUserfield()); + log.debug("LDAP user entry retrieved: {}", entry.toString()); } - // if userfield is not found in the DN, search in the rest of the attributes - ArrayList userAttributes = new ArrayList<>(entry.getAttributes()); - if (userAttributes != null) { - for (Attribute ae : userAttributes) { - if (ae.getId().trim().equalsIgnoreCase(ldapUnivSyncFormData.getLdapUserfield())) { - userKey = ae.getString(); + // Process the entry's DN and attributes + String dn = entry.getDn().toString(); + String userKey = retrieveValueFromDN(dn, ldapUnivSyncFormData.getLdapUserfield()); + + if (StringUtils.isEmpty(userKey)) { + ArrayList userAttributes = new ArrayList<>(entry.getAttributes()); + if (enabledDetailedLogs) { + log.debug("Number of attributes retrieved: " + userAttributes.size()); + log.debug( + "User dn {} does not contain {}(userfield). Fetching user attributes...", + dn, + ldapUnivSyncFormData.getLdapUserfield()); + } + + // If userKey not found in DN, search in the attributes + for (Attribute attribute : userAttributes) { + if (attribute.getId().equalsIgnoreCase(ldapUnivSyncFormData.getLdapUserfield())) { + userKey = attribute.getString(); if (enabledDetailedLogs) { - log.debug("User name: {} retrieved from user attribute: {}", userKey, ae.getId()); + log.debug("Iterating attribute here: " + attribute); + log.debug( + "User name: {} retrieved from user attribute: {}", userKey, attribute.getId()); } } } - // Clear the list to remove all elements - userAttributes.clear(); - // Set the reference to null to allow for garbage collection - userAttributes = null; } - } - if (enabledDetailedLogs && StringUtils.isEmpty(userKey)) { - log.warn( - "User {} does not contain '{}'(userfield). Skipping the user from the sync...", - dn, - ldapUnivSyncFormData.getLdapUserfield()); - } - if (!StringUtils.isEmpty(userKey)) { - Attribute groups = entry.get(ldapUnivSyncFormData.getLdapGroupMemberOfAttribute()); - List groupKeys = new ArrayList<>(); - if (groups != null) { - for (Value group : groups) { - String groupKey = - retrieveValueFromDN(group.getString(), ldapUnivSyncFormData.getLdapGroupfield()); - groupKeys.add(groupKey); - - if (!taskParams().ldapGroups.contains(groupKey)) { - // If not present, add it to the list - taskParams().ldapGroups.add(groupKey); + if (enabledDetailedLogs && StringUtils.isEmpty(userKey)) { + log.warn( + "User {} does not contain '{}'(userfield). Skipping the user from the sync...", + dn, + ldapUnivSyncFormData.getLdapUserfield()); + } + + // Process groups + if (!StringUtils.isEmpty(userKey)) { + Attribute groups = entry.get(ldapUnivSyncFormData.getLdapGroupMemberOfAttribute()); + List groupKeys = new ArrayList<>(); + if (groups != null) { + for (Value group : groups) { + String groupKey = + retrieveValueFromDN(group.getString(), ldapUnivSyncFormData.getLdapGroupfield()); + if (ldapUnivSyncFormData.getGroupsToSync().size() == 0 + || ldapUnivSyncFormData.getGroupsToSync().contains(groupKey)) { + groupKeys.add(groupKey); + + if (!taskParams().ldapGroups.contains(groupKey)) { + taskParams().ldapGroups.add(groupKey); + } + } } } + taskParams().userToGroup.put(userKey, groupKeys); } - taskParams().userToGroup.put(userKey, groupKeys); } - } + + // Retrieve the PagedResultsControl from the SearchResultDone + SearchResultDone searchResultDone = cursor.getSearchResultDone(); + PagedResultsImpl responseControl = + (PagedResultsImpl) searchResultDone.getControl(PagedResultsImpl.OID); + cookie = (responseControl != null) ? responseControl.getCookie() : null; + + cursor.close(); + + } while (cookie != null && cookie.length > 0); // Continue pagination if cookie exists } @Override diff --git a/managed/src/main/java/com/yugabyte/yw/common/config/GlobalConfKeys.java b/managed/src/main/java/com/yugabyte/yw/common/config/GlobalConfKeys.java index 8ce362bb5add..c49d269ac2a7 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/config/GlobalConfKeys.java +++ b/managed/src/main/java/com/yugabyte/yw/common/config/GlobalConfKeys.java @@ -647,6 +647,14 @@ public class GlobalConfKeys extends RuntimeConfigKeysModule { "Enforce server certificate verification for LDAPs/LDAP-TLS", ConfDataType.BooleanType, ImmutableList.of(ConfKeyTags.PUBLIC)); + public static ConfKeyInfo ldapPageQuerySize = + new ConfKeyInfo<>( + "yb.security.ldap.page_query_size", + ScopeType.GLOBAL, + "Pagination query size for LDAP server", + "Pagination query size for LDAP server", + ConfDataType.IntegerType, + ImmutableList.of(ConfKeyTags.PUBLIC)); public static ConfKeyInfo enableDetailedLogs = new ConfKeyInfo<>( "yb.security.enable_detailed_logs", diff --git a/managed/src/main/java/com/yugabyte/yw/forms/LdapUnivSyncFormData.java b/managed/src/main/java/com/yugabyte/yw/forms/LdapUnivSyncFormData.java index 7d0dceee4392..0e7a23f18cbb 100644 --- a/managed/src/main/java/com/yugabyte/yw/forms/LdapUnivSyncFormData.java +++ b/managed/src/main/java/com/yugabyte/yw/forms/LdapUnivSyncFormData.java @@ -45,13 +45,8 @@ public enum TargetApi { private String ldapBindPassword; @ApiModelProperty( - value = - "LDAP search filter to get the user entries. This filter can also be used to search for" - + " the users based on their group memberships.", - example = - "(objectclass=person)," - + " (&(objectclass=person)(|(groupName=CN=group1,CN=Groups,DC=example,DC=com)" - + "(groupName=CN=group2,CN=Groups,DC=example,DC=com)))") + value = "LDAP search filter to get the user entries", + example = "(objectclass=person)") private String ldapSearchFilter; @ApiModelProperty(value = "Dn of the search starting point.", example = "dc=example,dc=org") @@ -68,7 +63,7 @@ public enum TargetApi { private String ldapUserfield; @ApiModelProperty( - value = "Group dn field to get the group's name from", + value = "LDAP field to get the group information", required = true, example = "cn") @Constraints.Required() @@ -86,6 +81,12 @@ public enum TargetApi { @ApiModelProperty(value = "TLS versions for LDAPS : TLSv1, TLSv1_1, TLSv1_2") private TlsProtocol ldapTlsProtocol = TlsProtocol.TLSv1_2; + @ApiModelProperty( + value = + "LDAP groups to sync. In case user belongs to multiple groups &" + + " we don't want to sync all of them to DB") + private List groupsToSync = new ArrayList<>(); + public String getDbUser() { if (this.dbUser.isEmpty() && this.targetApi.equals(TargetApi.ycql)) { return Util.DEFAULT_YCQL_USERNAME; diff --git a/managed/src/main/resources/reference.conf b/managed/src/main/resources/reference.conf index 22901ec0d896..ac8a11e1c822 100644 --- a/managed/src/main/resources/reference.conf +++ b/managed/src/main/resources/reference.conf @@ -942,6 +942,7 @@ yb { ldap_tls_protocol="TLSv1_2" enforce_server_cert_verification = true ldap_universe_sync = "false" + page_query_size = 2 } forbidden_ips="169.254.169.254" custom_hooks { diff --git a/managed/src/main/resources/swagger-strict.json b/managed/src/main/resources/swagger-strict.json index 221dac3ea0ca..f2fecde253b1 100644 --- a/managed/src/main/resources/swagger-strict.json +++ b/managed/src/main/resources/swagger-strict.json @@ -6951,6 +6951,13 @@ }, "type" : "array" }, + "groupsToSync" : { + "description" : "LDAP groups to sync. In case user belongs to multiple groups & we don't want to sync all of them to DB", + "items" : { + "type" : "string" + }, + "type" : "array" + }, "ldapBasedn" : { "description" : "Dn of the search starting point.", "example" : "dc=example,dc=org", @@ -6970,7 +6977,7 @@ "type" : "string" }, "ldapGroupfield" : { - "description" : "Group dn field to get the group's name from", + "description" : "LDAP field to get the group information", "example" : "cn", "type" : "string" }, @@ -6980,8 +6987,8 @@ "type" : "integer" }, "ldapSearchFilter" : { - "description" : "LDAP search filter to get the user entries. This filter can also be used to search for the users based on their group memberships.", - "example" : "(objectclass=person), (&(objectclass=person)(|(groupName=CN=group1,CN=Groups,DC=example,DC=com)(groupName=CN=group2,CN=Groups,DC=example,DC=com)))", + "description" : "LDAP search filter to get the user entries", + "example" : "(objectclass=person)", "type" : "string" }, "ldapServer" : { diff --git a/managed/src/main/resources/swagger.json b/managed/src/main/resources/swagger.json index 2ec9640381ee..15c20fd2fac1 100644 --- a/managed/src/main/resources/swagger.json +++ b/managed/src/main/resources/swagger.json @@ -6998,6 +6998,13 @@ }, "type" : "array" }, + "groupsToSync" : { + "description" : "LDAP groups to sync. In case user belongs to multiple groups & we don't want to sync all of them to DB", + "items" : { + "type" : "string" + }, + "type" : "array" + }, "ldapBasedn" : { "description" : "Dn of the search starting point.", "example" : "dc=example,dc=org", @@ -7017,7 +7024,7 @@ "type" : "string" }, "ldapGroupfield" : { - "description" : "Group dn field to get the group's name from", + "description" : "LDAP field to get the group information", "example" : "cn", "type" : "string" }, @@ -7027,8 +7034,8 @@ "type" : "integer" }, "ldapSearchFilter" : { - "description" : "LDAP search filter to get the user entries. This filter can also be used to search for the users based on their group memberships.", - "example" : "(objectclass=person), (&(objectclass=person)(|(groupName=CN=group1,CN=Groups,DC=example,DC=com)(groupName=CN=group2,CN=Groups,DC=example,DC=com)))", + "description" : "LDAP search filter to get the user entries", + "example" : "(objectclass=person)", "type" : "string" }, "ldapServer" : { From 59fa2cdcf05e8cc43bbe235b89a4665f150b6827 Mon Sep 17 00:00:00 2001 From: Shubham Date: Mon, 16 Sep 2024 18:48:26 +0530 Subject: [PATCH 31/75] [PLAT-15295] Use Pagination for querying LDAP server Summary: Fixes the swagger for runtime config field Test Plan: na Reviewers: asharma, vbansal, dkumar Reviewed By: dkumar Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38074 --- managed/RUNTIME-FLAGS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/managed/RUNTIME-FLAGS.md b/managed/RUNTIME-FLAGS.md index 0e0d92f80223..fa8a092dc58c 100644 --- a/managed/RUNTIME-FLAGS.md +++ b/managed/RUNTIME-FLAGS.md @@ -89,6 +89,7 @@ | "Max Size of each log message" | "yb.logs.max_msg_size" | "GLOBAL" | "We limit the length of each log line as sometimes we dump entire output of script. If you want to debug something specific and the script output isgetting truncated in application log then increase this limit" | "Bytes" | | "KMS Refresh Interval" | "yb.kms.refresh_interval" | "GLOBAL" | "Default refresh interval for the KMS providers." | "Duration" | | "Server certificate verification for LDAPs/LDAP-TLS" | "yb.security.ldap.enforce_server_cert_verification" | "GLOBAL" | "Enforce server certificate verification for LDAPs/LDAP-TLS" | "Boolean" | +| "Pagination query size for LDAP server" | "yb.security.ldap.page_query_size" | "GLOBAL" | "Pagination query size for LDAP server" | "Integer" | | "Enable Detailed Logs" | "yb.security.enable_detailed_logs" | "GLOBAL" | "Enable detailed security logs" | "Boolean" | | "Maximum Volume Count" | "yb.max_volume_count" | "GLOBAL" | "Maximum Volume Count" | "Integer" | | "Task Garbage Collector Check Interval" | "yb.taskGC.gc_check_interval" | "GLOBAL" | "How frequently do we check for completed tasks in database" | "Duration" | From af83ee58cbb0bf9091bae89f2c4ea7ba90644179 Mon Sep 17 00:00:00 2001 From: Deepti-yb Date: Fri, 13 Sep 2024 04:15:56 +0000 Subject: [PATCH 32/75] [PLAT-15261][Master]Backups created through the Scheduled Backup feature are incorrectly tagged under the login user Summary: Backups created through the Scheduled Backup feature are incorrectly tagged under the login user (e.g., demo@yugabyte.com) instead of the scheduled backup name. This is because we first check for the Context user instead of the schedule name passed as a param while creating the `CustomerTask` entry. Switched the order of preference in this diff Test Plan: - Create a schedule in a universe - Verified user in the task page is the schedule name {F285820} - Logged out and waited for the backup task to still have schedule name Reviewers: vkumar Reviewed By: vkumar Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38029 --- .../com/yugabyte/yw/models/CustomerTask.java | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/managed/src/main/java/com/yugabyte/yw/models/CustomerTask.java b/managed/src/main/java/com/yugabyte/yw/models/CustomerTask.java index 90a792f0b37c..931f0b6252d0 100644 --- a/managed/src/main/java/com/yugabyte/yw/models/CustomerTask.java +++ b/managed/src/main/java/com/yugabyte/yw/models/CustomerTask.java @@ -722,21 +722,20 @@ public static CustomerTask create( th.targetName = targetName; th.createTime = new Date(); th.customTypeName = customTypeName; - String emailFromContext = Util.maybeGetEmailFromContext(); - if (emailFromContext.equals("Unknown")) { - // When task is not created as a part of user action get email of the scheduler. - String emailFromSchedule = maybeGetEmailFromSchedule(); - if (emailFromSchedule.equals("Unknown")) { - if (!StringUtils.isEmpty(userEmail)) { - th.userEmail = userEmail; - } else { + if (StringUtils.isEmpty(userEmail)) { + String emailFromContext = Util.maybeGetEmailFromContext(); + if (emailFromContext.equals("Unknown")) { + String emailFromSchedule = maybeGetEmailFromSchedule(); + if (emailFromSchedule.equals("Unknown")) { th.userEmail = "Unknown"; + } else { + th.userEmail = emailFromSchedule; } } else { - th.userEmail = emailFromSchedule; + th.userEmail = emailFromContext; } } else { - th.userEmail = emailFromContext; + th.userEmail = userEmail; } String correlationId = (String) MDC.get(LogUtil.CORRELATION_ID); if (!Strings.isNullOrEmpty(correlationId)) th.correlationId = correlationId; From 55b4187d6069e8bac08bb9e761b724ed1e867184 Mon Sep 17 00:00:00 2001 From: Deepti-yb Date: Mon, 16 Sep 2024 08:20:07 +0000 Subject: [PATCH 33/75] [PLAT-15320][YBA CLI]K8s Universes throw `interface {} is string, not []float64` error Summary: After the introduction of template files for configuration, the float slices conversion started throwing an error. This is because viper does not contain a function to read float slices, and we ha to rely on viper.Get() to fetch the values. Despite the return type being interface, the actual type of the Get() return is string, which is what was causing the current issue. Introduced a function that would explicitly convert the string read by `viper.Get()` to a float slice in this diff. Test Plan: Tested out k8s universe creation using YBA CLI Reviewers: sneelakantan Reviewed By: sneelakantan Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38065 --- .../yba-cli/cmd/universe/build_universe.go | 65 +++++++++++++------ .../yba-cli/cmd/universe/create_universe.go | 4 +- managed/yba-cli/cmd/universe/list_universe.go | 4 +- managed/yba-cli/cmd/util/constants.go | 6 ++ managed/yba-cli/cmd/util/util.go | 18 +++++ 5 files changed, 74 insertions(+), 23 deletions(-) diff --git a/managed/yba-cli/cmd/universe/build_universe.go b/managed/yba-cli/cmd/universe/build_universe.go index f283cf726f08..1db98d97497d 100644 --- a/managed/yba-cli/cmd/universe/build_universe.go +++ b/managed/yba-cli/cmd/universe/build_universe.go @@ -10,7 +10,6 @@ import ( "strings" "github.com/sirupsen/logrus" - "github.com/spf13/cobra" ybaclient "github.com/yugabyte/platform-go-client" "github.com/yugabyte/yugabyte-db/managed/yba-cli/cmd/releases" "github.com/yugabyte/yugabyte-db/managed/yba-cli/cmd/universe/upgrade" @@ -20,8 +19,9 @@ import ( ) var checkInterfaceType []interface{} +var checkStringType string -func buildCommunicationPorts(cmd *cobra.Command) *ybaclient.CommunicationPorts { +func buildCommunicationPorts() *ybaclient.CommunicationPorts { masterHTTPPort := v1.GetInt("master-http-port") masterRPCPort := v1.GetInt("master-rpc-port") nodeExporterPort := v1.GetInt("node-exporter-port") @@ -50,7 +50,6 @@ func buildCommunicationPorts(cmd *cobra.Command) *ybaclient.CommunicationPorts { } func buildClusters( - cmd *cobra.Command, authAPI *ybaAuthClient.AuthAPIClient, universeName string, ) ( @@ -107,7 +106,7 @@ func buildClusters( } imageBundlesInProvider := providerUsed.GetImageBundles() - if len(imageBundlesInProvider) == 0 { + if len(imageBundlesInProvider) == 0 && util.IsCloudBasedProvider(providerType) { return nil, fmt.Errorf("no image bundles found for provider %s", providerName) } @@ -139,7 +138,7 @@ func buildClusters( } if len(imageBundleUUIDs) != len(linuxVersionsInput) { - fmt.Errorf("the provided linux version name cannot be found") + return nil, fmt.Errorf("the provided linux version name cannot be found") } imageBundleLen := len(imageBundleUUIDs) @@ -165,6 +164,12 @@ func buildClusters( k8sTserverMemSizeInterface := v1.Get("k8s-tserver-mem-size") if reflect.TypeOf(k8sTserverMemSizeInterface) == reflect.TypeOf(checkInterfaceType) { k8sTserverMemSize = *util.Float64Slice(k8sTserverMemSizeInterface.([]interface{})) + } else if reflect.TypeOf(k8sTserverMemSizeInterface) == reflect.TypeOf(checkStringType) { + k8sTserverMemSize, err = util.GetFloat64SliceFromString( + k8sTserverMemSizeInterface.(string)) + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } } else { k8sTserverMemSize = k8sTserverMemSizeInterface.([]float64) } @@ -172,6 +177,12 @@ func buildClusters( k8sTserverCPUCoreCountInterface := v1.Get("k8s-tserver-cpu-core-count") if reflect.TypeOf(k8sTserverCPUCoreCountInterface) == reflect.TypeOf(checkInterfaceType) { k8sTserverCPUCoreCount = *util.Float64Slice(k8sTserverCPUCoreCountInterface.([]interface{})) + } else if reflect.TypeOf(k8sTserverCPUCoreCountInterface) == reflect.TypeOf(checkStringType) { + k8sTserverCPUCoreCount, err = util.GetFloat64SliceFromString( + k8sTserverCPUCoreCountInterface.(string)) + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } } else { k8sTserverCPUCoreCount = k8sTserverCPUCoreCountInterface.([]float64) } @@ -179,6 +190,12 @@ func buildClusters( k8sMasterMemSizeInterface := v1.Get("k8s-master-mem-size") if reflect.TypeOf(k8sMasterMemSizeInterface) == reflect.TypeOf(checkInterfaceType) { k8sMasterMemSize = *util.Float64Slice(k8sMasterMemSizeInterface.([]interface{})) + } else if reflect.TypeOf(k8sMasterMemSizeInterface) == reflect.TypeOf(checkStringType) { + k8sMasterMemSize, err = util.GetFloat64SliceFromString( + k8sMasterMemSizeInterface.(string)) + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } } else { k8sMasterMemSize = k8sMasterMemSizeInterface.([]float64) } @@ -186,6 +203,12 @@ func buildClusters( k8sMasterCPUCoreCountInterface := v1.Get("k8s-master-cpu-core-count") if reflect.TypeOf(k8sMasterCPUCoreCountInterface) == reflect.TypeOf(checkInterfaceType) { k8sMasterCPUCoreCount = *util.Float64Slice(k8sMasterCPUCoreCountInterface.([]interface{})) + } else if reflect.TypeOf(k8sMasterCPUCoreCountInterface) == reflect.TypeOf(checkStringType) { + k8sMasterCPUCoreCount, err = util.GetFloat64SliceFromString( + k8sMasterCPUCoreCountInterface.(string)) + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } } else { k8sMasterCPUCoreCount = k8sMasterCPUCoreCountInterface.([]float64) } @@ -204,7 +227,10 @@ func buildClusters( if len(masterInstanceTypeList) > 0 { masterInstanceType = masterInstanceTypeList[0] } - masterDeviceInfo, err = buildMasterDeviceInfo(cmd, providerType, masterInstanceType, onpremInstanceTypeDefault) + masterDeviceInfo, err = buildMasterDeviceInfo( + providerType, + masterInstanceType, + onpremInstanceTypeDefault) if err != nil { return nil, err } @@ -315,7 +341,11 @@ func buildClusters( logrus.Info("Using preferred regions: ", preferredRegions, "\n") - deviceInfo, err := buildDeviceInfo(cmd, providerType, noOfClusters, instanceTypes, onpremInstanceTypeDefault) + deviceInfo, err := buildDeviceInfo( + providerType, + noOfClusters, + instanceTypes, + onpremInstanceTypeDefault) if err != nil { return nil, err } @@ -517,9 +547,8 @@ func buildClusters( Provider: util.GetStringPointer(providerUUID), DedicatedNodes: util.GetBoolPointer(dedicatedNodes), - InstanceType: util.GetStringPointer(instanceTypes[i]), - ImageBundleUUID: util.GetStringPointer(imageBundleUUIDs[i]), - DeviceInfo: deviceInfo[i], + InstanceType: util.GetStringPointer(instanceTypes[i]), + DeviceInfo: deviceInfo[i], MasterInstanceType: util.GetStringPointer(masterInstanceType), MasterDeviceInfo: masterDeviceInfo, @@ -562,19 +591,15 @@ func buildClusters( k8sMasterCPUCoreCountLen := len(k8sMasterCPUCoreCount) if i == k8sTserverMemSizeLen { k8sTserverMemSize = append(k8sTserverMemSize, 4) - k8sTserverMemSizeLen = k8sTserverMemSizeLen + 1 } if i == k8sMasterMemSizeLen { k8sMasterMemSize = append(k8sMasterMemSize, 4) - k8sMasterMemSizeLen = k8sMasterMemSizeLen + 1 } if i == k8sTserverCPUCoreCountLen { k8sTserverCPUCoreCount = append(k8sTserverCPUCoreCount, 2) - k8sTserverCPUCoreCountLen = k8sTserverCPUCoreCountLen + 1 } if i == k8sMasterCPUCoreCountLen { k8sMasterCPUCoreCount = append(k8sTserverCPUCoreCount, 2) - k8sMasterCPUCoreCountLen = k8sMasterCPUCoreCountLen + 1 } userIntent := c.GetUserIntent() userIntent.SetTserverK8SNodeResourceSpec(ybaclient.K8SNodeResourceSpec{ @@ -587,6 +612,11 @@ func buildClusters( }) c.SetUserIntent(userIntent) } + if util.IsCloudBasedProvider(providerType) { + userIntent := c.GetUserIntent() + userIntent.SetImageBundleUUID(imageBundleUUIDs[i]) + c.SetUserIntent(userIntent) + } res = append(res, c) } @@ -594,7 +624,6 @@ func buildClusters( } func buildDeviceInfo( - cmd *cobra.Command, providerType string, noOfClusters int, instanceTypes []string, @@ -711,7 +740,7 @@ func buildDeviceInfo( volumeSizeLen = volumeSizeLen + 1 } if i == storageTypeLen { - storageTypeDefault := setDefaultStorageTypes(providerType, onpremVolumeDefault) + storageTypeDefault := setDefaultStorageTypes(providerType) if providerType == util.AWSProviderType && util.AwsInstanceTypesWithEphemeralStorageOnly(instanceTypes[i]) { storageTypeDefault = "" } @@ -733,7 +762,6 @@ func buildDeviceInfo( } func buildMasterDeviceInfo( - cmd *cobra.Command, providerType string, instanceType string, onpremInstanceTypeDefault ybaclient.InstanceTypeResp) ( @@ -772,7 +800,7 @@ func buildMasterDeviceInfo( } if len(storageType) == 0 { - storageType = setDefaultStorageTypes(providerType, onpremVolumeDefault) + storageType = setDefaultStorageTypes(providerType) if providerType == util.AWSProviderType && util.AwsInstanceTypesWithEphemeralStorageOnly(instanceType) { storageType = "" } @@ -823,7 +851,6 @@ func setDefaultInstanceTypes( func setDefaultStorageTypes( providerType string, - onpremVolumeDefault ybaclient.VolumeDetails, ) ( storageType string, ) { diff --git a/managed/yba-cli/cmd/universe/create_universe.go b/managed/yba-cli/cmd/universe/create_universe.go index abeffd5a5d4a..7fded011b542 100644 --- a/managed/yba-cli/cmd/universe/create_universe.go +++ b/managed/yba-cli/cmd/universe/create_universe.go @@ -84,7 +84,7 @@ var createUniverseCmd = &cobra.Command{ } enableYbc := true - communicationPorts := buildCommunicationPorts(cmd) + communicationPorts := buildCommunicationPorts() certUUID := "" clientRootCA := v1.GetString("root-ca") @@ -149,7 +149,7 @@ var createUniverseCmd = &cobra.Command{ cpuArch = util.X86_64 } - clusters, err := buildClusters(cmd, authAPI, universeName) + clusters, err := buildClusters(authAPI, universeName) if err != nil { logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) } diff --git a/managed/yba-cli/cmd/universe/list_universe.go b/managed/yba-cli/cmd/universe/list_universe.go index b6c57d30c859..5d96e723035d 100644 --- a/managed/yba-cli/cmd/universe/list_universe.go +++ b/managed/yba-cli/cmd/universe/list_universe.go @@ -43,9 +43,9 @@ var listUniverseCmd = &cobra.Command{ } if len(r) < 1 { if util.IsOutputType(formatter.TableFormatKey) { - logrus.Infoln("No universes found\n") + logrus.Info("No universes found\n") } else { - logrus.Infoln("[]\n") + logrus.Info("[]\n") } return } diff --git a/managed/yba-cli/cmd/util/constants.go b/managed/yba-cli/cmd/util/constants.go index 8336b103e856..8b32b2389d38 100644 --- a/managed/yba-cli/cmd/util/constants.go +++ b/managed/yba-cli/cmd/util/constants.go @@ -394,3 +394,9 @@ func AwsInstanceTypesWithEphemeralStorageOnly(instanceType string) bool { } return false } + +// IsCloudBasedProvider returns true if the provider is AWS, Azure or GCP +func IsCloudBasedProvider(providerType string) bool { + return providerType == AWSProviderType || + providerType == AzureProviderType || providerType == GCPProviderType +} diff --git a/managed/yba-cli/cmd/util/util.go b/managed/yba-cli/cmd/util/util.go index 0c47e6ab57f0..ff93cd0602ba 100644 --- a/managed/yba-cli/cmd/util/util.go +++ b/managed/yba-cli/cmd/util/util.go @@ -133,6 +133,24 @@ func CreateSingletonList(in interface{}) []interface{} { return []interface{}{in} } +// GetFloat64SliceFromString returns a slice of float64 from a string +func GetFloat64SliceFromString(in string) ([]float64, error) { + if in == "" { + return nil, nil + } + in = strings.Trim(in, "[ ]") + s := strings.Split(in, ",") + var out []float64 + for _, v := range s { + f, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, err + } + out = append(out, f) + } + return out, nil +} + // YbaStructuredError is a structure mimicking YBPError, with error being an interface{} // to accomodate errors thrown as YBPStructuredError type YbaStructuredError struct { From f97d7d5786c47b03bab07179fdfbccf051fb96e8 Mon Sep 17 00:00:00 2001 From: Rahul Barigidad Date: Mon, 16 Sep 2024 14:01:14 +0000 Subject: [PATCH 34/75] [#23770] [#23797] [#23837] YSQL: Fix most non-regress tests when run with Connection Manager Summary: Some additional tests can be fixed when running with Connection Manager, using different methods. The list of the tests being fixed, along with method/reasoning of fix are stated below: [DISABLED TESTS] TestAlterTableWithConcurrentTxn.testTransactionConflictErrorCode - Disabled due to flakiness that needs further understanding. TestPgAuthorization.testRevokeLoginMidSession - Disabled until better understanding of the problem. TestPgAuthorization.testConnectionLimitDecreasedMidSession - Disabled until better understanding of the problem. PgYbHashCodeScanProjection.testScans - For this specific test, we would need to still remove connection manager debug + query logs in order to reduce flakiness, disabling test until we can make log levels configurable via common server flags. PgYbStat.testYbTerminatedQueriesMultipleCauses - Test is flaky with modifications via round robin killing of backends as the assertion check may pass or fail when checking if connection is still alive after having killed backend processes. Disabling test for now. [TESTS WITHOUT WARMUP] TestPgExplainAnalyze.testSeqScan - While running in any warmup mode, catalog read requests decrease, but not to the expected value of 0. Allow test to run in single connection mode until further analysis. TestPgExplainAnalyze.testInsertValues - While running in any warmup mode, catalog read requests decrease, but not to the expected value of 0. Allow test to run in single connection mode until further analysis. TestToastFunction.testSimple - Memory allocation is significantly higher with a pool of warmed up connections, allow the test to run with a single connection to allow metrics assertions to pass. [TESTS WITH ROUND ROBIN ALLOCATION OF WARMED UP CONNECTIONS] TestPgCostModelSeekNextEstimation.testSeekNextEstimationIndexScan - Modified to have round robin behaviour in an earlier diff, allow setUp to happen after having restarted the cluster in round robin mode. TestPgCostModelSeekNextEstimation.testSeekNextEstimationBitmapScan - Modified to have round robin behaviour in an earlier diff, allow setUp to happen after having restarted the cluster in round robin mode. Jira: DB-12674, DB-12699, DB-12741 Test Plan: Jenkins: enable connection manager, all tests Reviewers: skumar, mkumar Reviewed By: skumar Subscribers: yql Differential Revision: https://phorge.dev.yugabyte.com/D37826 --- .../test/java/org/yb/pgsql/BasePgSQLTest.java | 10 ++++++++ .../TestAlterTableWithConcurrentTxn.java | 4 ++++ .../org/yb/pgsql/TestPgAuthorization.java | 6 +++++ .../TestPgCostModelSeekNextEstimation.java | 11 +++++++++ .../org/yb/pgsql/TestPgExplainAnalyze.java | 18 ++++++++++++-- .../pgsql/TestPgYbHashCodeScanProjection.java | 12 ++++++++++ .../test/java/org/yb/pgsql/TestPgYbStat.java | 24 ++++++------------- .../java/org/yb/pgsql/TestToastFunction.java | 10 +++++++- src/odyssey/sources/server_pool.h | 4 +++- 9 files changed, 78 insertions(+), 21 deletions(-) diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/BasePgSQLTest.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/BasePgSQLTest.java index 22622026fa72..751483152d5f 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/BasePgSQLTest.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/BasePgSQLTest.java @@ -178,6 +178,16 @@ public class BasePgSQLTest extends BaseMiniClusterTest { "variables at the beginning of transaction boundaries, causing erroneous results in " + "the test, leading to failure."; + protected static final String INCORRECT_CONN_STATE_BEHAVIOR = + "Skipping this test with Connection Manager enabled. The connections may not be in the " + + "expected state due to the way physical connections are attached and detached from " + + "logical connections, where certain setting changes should only exist in new connections."; + + protected static final String CONFIGURABLE_DEBUG_LOGS_NEEDED = + "(DB-12742) Skipping this test with Connection Manager enabled. The test requires the " + + "ability to configure debug logs for connection manager to be at the same levels as " + + "tserver log levels."; + // Warmup modes for Connection Manager during test runs. protected static enum ConnectionManagerWarmupMode { NONE, diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestAlterTableWithConcurrentTxn.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestAlterTableWithConcurrentTxn.java index cbdf9c8ea0f1..488464c467ba 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestAlterTableWithConcurrentTxn.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestAlterTableWithConcurrentTxn.java @@ -863,6 +863,10 @@ public void testDmlTransactionWithAlterOnDifferentResource() throws Exception { @Test public void testTransactionConflictErrorCode() throws Exception { + // (DB-12741) Disabling the test due to a flaky failure point when run with + // connection manager, needs further investigation. + assumeFalse(BasePgSQLTest.INCORRECT_CONN_STATE_BEHAVIOR, isTestRunningWithConnectionManager()); + try (Connection conn1 = getConnectionBuilder().connect(); Statement stmt1 = conn1.createStatement(); Connection conn2 = getConnectionBuilder().connect(); diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgAuthorization.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgAuthorization.java index d1934b805244..dc78ec6621d8 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgAuthorization.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgAuthorization.java @@ -2707,6 +2707,9 @@ public void testCreateSchemaAuthorization() throws Exception { @Test public void testRevokeLoginMidSession() throws Exception { + // (DB-12741) Skip this test if running with connection manager. + assumeFalse(BasePgSQLTest.INCORRECT_CONN_STATE_BEHAVIOR, isTestRunningWithConnectionManager()); + try (Connection connection1 = getConnectionBuilder().withTServer(0).connect(); Statement statement1 = connection1.createStatement()) { @@ -2847,6 +2850,9 @@ public void testRevokeAttributesMidSession() throws Exception { @Test public void testConnectionLimitDecreasedMidSession() throws Exception { + // (DB-12741) Skip this test if running with connection manager. + assumeFalse(BasePgSQLTest.INCORRECT_CONN_STATE_BEHAVIOR, isTestRunningWithConnectionManager()); + try (Connection connection1 = getConnectionBuilder().withTServer(0).connect(); Statement statement1 = connection1.createStatement()) { diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgCostModelSeekNextEstimation.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgCostModelSeekNextEstimation.java index 122ede2690b5..606fc7c9fabd 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgCostModelSeekNextEstimation.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgCostModelSeekNextEstimation.java @@ -306,8 +306,13 @@ protected Map getTServerFlags() { // in Nov/2023. @Test public void testSeekNextEstimationIndexScan() throws Exception { + // (DB-12674) Allow tests to run in round-robin allocation mode when + // using a pool of warmed up connections to allow for deterministic results. setConnMgrWarmupModeAndRestartCluster(ConnectionManagerWarmupMode.ROUND_ROBIN); boolean isConnMgr = isTestRunningWithConnectionManager(); + if (isConnMgr) { + setUp(); + } try (Statement stmt = this.connection2.createStatement()) { // Warmup the cache when Connection Manager is enabled. // Additionally warmup all backends in round-robin mode. @@ -432,8 +437,14 @@ public void testSeekNextEstimationIndexScan() throws Exception { @Test public void testSeekNextEstimationBitmapScan() throws Exception { assumeTrue("BitmapScan has much fewer nexts in fastdebug (#22052)", TestUtils.isReleaseBuild()); + + // (DB-12674) Allow tests to run in round-robin allocation mode when + // using a pool of warmed up connections to allow for deterministic results. setConnMgrWarmupModeAndRestartCluster(ConnectionManagerWarmupMode.ROUND_ROBIN); boolean isConnMgr = isTestRunningWithConnectionManager(); + if (isConnMgr) { + setUp(); + } try (Statement stmt = this.connection2.createStatement()) { stmt.execute("SET work_mem TO '1GB'"); /* avoid getting close to work_mem */ // Warmup the cache when Connection Manager is enabled. diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgExplainAnalyze.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgExplainAnalyze.java index d265bd6de331..660277768213 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgExplainAnalyze.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgExplainAnalyze.java @@ -96,7 +96,14 @@ public void testExplainNoTiming(String query, Checker checker) throws Exception @Test public void testSeqScan() throws Exception { - setConnMgrWarmupModeAndRestartCluster(ConnectionManagerWarmupMode.ROUND_ROBIN); + // (DB-12699) Catalog read requests decrease in any warmup mode when + // connection manager is enabled, but not to the expected value of 0. + // Allow the test to run without warmed up connections for now. + setConnMgrWarmupModeAndRestartCluster(ConnectionManagerWarmupMode.NONE); + if (isTestRunningWithConnectionManager()) { + setUp(); + } + try (Statement stmt = connection.createStatement()) { Checker checker = makeTopLevelBuilder() .storageReadRequests(Checkers.equal(5)) @@ -342,7 +349,14 @@ public void testEmptyNestedLoop() throws Exception { @Test public void testInsertValues() throws Exception { - setConnMgrWarmupModeAndRestartCluster(ConnectionManagerWarmupMode.ROUND_ROBIN); + // (DB-12699) Catalog read requests decrease in any warmup mode when + // connection manager is enabled, but not to the expected value of 0. + // Allow the test to run without warmed up connections for now. + setConnMgrWarmupModeAndRestartCluster(ConnectionManagerWarmupMode.NONE); + if (isTestRunningWithConnectionManager()) { + setUp(); + } + try (Statement stmt = connection.createStatement()) { // reduce the batch size to avoid 0 wait time stmt.execute("SET ysql_session_max_batch_size = 4"); diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgYbHashCodeScanProjection.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgYbHashCodeScanProjection.java index 7f5ff0a943cf..8dc2b156ad4d 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgYbHashCodeScanProjection.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgYbHashCodeScanProjection.java @@ -16,6 +16,7 @@ import static org.yb.AssertionWrappers.assertEquals; import static org.yb.AssertionWrappers.assertFalse; import static org.yb.AssertionWrappers.assertTrue; +import static org.junit.Assume.*; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -200,7 +201,18 @@ private void testOneCase( @Test public void testScans() throws Exception { + // (DB-12741) This test works with connection manager provided that its + // debug and query logs are disabled, which allows the logInterceptor to + // capture the needed information for this test to pass. Skip the test for + // the time being when connection manager is enabled. + assumeFalse(BasePgSQLTest.CONFIGURABLE_DEBUG_LOGS_NEEDED, isTestRunningWithConnectionManager()); + setConnMgrWarmupModeAndRestartCluster(ConnectionManagerWarmupMode.ROUND_ROBIN); + // Set up the necessary aspects of the test after restarting the cluster. + if (isConnMgrWarmupRoundRobinMode()) { + logInterceptor.detach(); + setUp(); + } try (Statement stmt = connection.createStatement()) { // Note: In case of using yb_hash_code function all its argument columns are fetched. // They are required for row recheck. diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgYbStat.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgYbStat.java index 539cfba790db..6503e120eeba 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgYbStat.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgYbStat.java @@ -14,6 +14,7 @@ package org.yb.pgsql; import static org.yb.AssertionWrappers.*; +import static org.junit.Assume.*; import java.sql.Connection; import java.sql.ResultSet; @@ -62,15 +63,7 @@ private void executeQueryAndExpectTempFileLimitExceeded(final String query, private void executeQueryAndSendSignal(final String query, final Connection inputConnection, final String signal) throws Exception { try (Statement statement = inputConnection.createStatement()) { - - int[] pids = new int[CONN_MGR_WARMUP_BACKEND_COUNT]; - if (isConnMgrWarmupRoundRobinMode()) { - for (int i = 0; i < CONN_MGR_WARMUP_BACKEND_COUNT; i++){ - pids[i] = getPid(inputConnection); - } - } else { - pids[0] = getPid(inputConnection); - } + final int pid = getPid(inputConnection); final CountDownLatch startSignal = new CountDownLatch(1); final List cmds = new ArrayList<>(); @@ -91,13 +84,7 @@ private void executeQueryAndSendSignal(final String query, startSignal.countDown(); startSignal.await(); Thread.sleep(100); // Allow the query execution a headstart before killing - if (isConnMgrWarmupRoundRobinMode()) { - for (int i = 0; i < CONN_MGR_WARMUP_BACKEND_COUNT; i++) { - ProcessUtil.signalProcess(pids[i], signal); - } - } else { - ProcessUtil.signalProcess(pids[0], signal); - } + ProcessUtil.signalProcess(pid, signal); }); MiscUtil.runInParallel(cmds, startSignal, 60); } catch (Throwable exception) { @@ -160,7 +147,10 @@ private boolean waitUntilConditionSatisfiedOrTimeout(String query, @Test public void testYbTerminatedQueriesMultipleCauses() throws Exception { - setConnMgrWarmupModeAndRestartCluster(ConnectionManagerWarmupMode.ROUND_ROBIN); + // (DB-12741) Test is flaky with connection manager irrespective of warmup + // mode. Disable the test for now when running with connection manager. + assumeFalse(BasePgSQLTest.INCORRECT_CONN_STATE_BEHAVIOR, isTestRunningWithConnectionManager()); + // We need to restart the cluster to wipe the state currently contained in yb_terminated_queries // that can potentially be leftover from another test in this class. This would let us start // with a clean slate. diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestToastFunction.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestToastFunction.java index 2ebabda8579a..cc266fdf4100 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestToastFunction.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestToastFunction.java @@ -29,7 +29,15 @@ public class TestToastFunction extends TestToastFunctionBase { private static final Logger LOG = LoggerFactory.getLogger(TestToastFunction.class); @Test - public void testSimple() throws SQLException { + public void testSimple() throws Exception { + // (DB-12699) In any warmup mode, the memory usage is much higher than in + // the case of no warmup mode. Allow the test to run exclusively in no + // warmup mode for now. + setConnMgrWarmupModeAndRestartCluster(ConnectionManagerWarmupMode.NONE); + if (isTestRunningWithConnectionManager()) { + createTables(); + } + setEnableToastFlag(true); CacheMemoryContextTracker cxt = simpleTest(); cxt.assertMemoryUsageLessThan(300 * KB); diff --git a/src/odyssey/sources/server_pool.h b/src/odyssey/sources/server_pool.h index b795c0ed3fc3..f9dcde456872 100644 --- a/src/odyssey/sources/server_pool.h +++ b/src/odyssey/sources/server_pool.h @@ -133,10 +133,12 @@ static inline od_server_t *yb_od_server_pool_idle_last(od_server_pool_t *pool) { od_list_t *target = &pool->idle; od_server_t *server = NULL; + od_list_t *i, *n; int len = pool->count_idle; if (len == 0) return NULL; - server = od_container_of(target->prev, od_server_t, link); + od_list_foreach(target, i) + server = od_container_of(i, od_server_t, link); return server; } From ee2b10815e872513b05f9ab96a61e49293e4a50a Mon Sep 17 00:00:00 2001 From: Aishwarya Chakravarthy Date: Mon, 16 Sep 2024 11:25:40 -0400 Subject: [PATCH 35/75] [docs] reverted PR 23909 changes (#23941) --- .../ysql-language-features/postgresql-compatibility.md | 4 ++-- docs/content/preview/reference/configuration/yugabyted.md | 2 +- docs/content/preview/releases/ybdb-releases/v2.21.md | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/content/preview/explore/ysql-language-features/postgresql-compatibility.md b/docs/content/preview/explore/ysql-language-features/postgresql-compatibility.md index fa6259f1b235..2ff23247cd22 100644 --- a/docs/content/preview/explore/ysql-language-features/postgresql-compatibility.md +++ b/docs/content/preview/explore/ysql-language-features/postgresql-compatibility.md @@ -135,12 +135,12 @@ Enables the use of PostgreSQL [parallel queries](https://www.postgresql.org/docs To enable EPCM in YugabyteDB: -- Pass the `enable_pg_parity_early_access` flag to [yugabyted](../../../reference/configuration/yugabyted/) when starting your cluster. +- Pass the `enable_pg_parity_tech_preview` flag to [yugabyted](../../../reference/configuration/yugabyted/) when starting your cluster. For example, from your YugabyteDB home directory, run the following command: ```sh -./bin/yugabyted start --enable_pg_parity_early_access +./bin/yugabyted start --enable_pg_parity_tech_preview ``` Note: When enabling the cost models, ensure that packed row for colocated tables is enabled by setting the `--ysql_enable_packed_row_for_colocated_table` flag to true. diff --git a/docs/content/preview/reference/configuration/yugabyted.md b/docs/content/preview/reference/configuration/yugabyted.md index a5312d329d14..1c0799419ad5 100644 --- a/docs/content/preview/reference/configuration/yugabyted.md +++ b/docs/content/preview/reference/configuration/yugabyted.md @@ -751,7 +751,7 @@ For on-premises deployments, consider racks as zones to treat them as fault doma : Enable or disable the backup daemon with yugabyted start. Default: `false` : If you start a cluster using the `--backup_daemon` flag, you also need to download and extract the [YB Controller release](https://downloads.yugabyte.com/ybc/2.1.0.0-b9/ybc-2.1.0.0-b9-linux-x86_64.tar.gz) to the yugabyte-{{< yb-version version="preview" >}} release directory. ---enable_pg_parity_early_access *PostgreSQL-compatibilty* +--enable_pg_parity_tech_preview *PostgreSQL-compatibilty* : Enable Enhanced PostgreSQL Compatibility Mode. Default: `false` #### Advanced flags diff --git a/docs/content/preview/releases/ybdb-releases/v2.21.md b/docs/content/preview/releases/ybdb-releases/v2.21.md index 2229b8fff659..eda5440b5607 100644 --- a/docs/content/preview/releases/ybdb-releases/v2.21.md +++ b/docs/content/preview/releases/ybdb-releases/v2.21.md @@ -350,12 +350,12 @@ Converted the `ysql_skip_row_lock_for_update` to an auto-flag to resolve compati We're pleased to announce the tech preview of the new Enhanced Postgres Compatibility Mode in the 2.21.0.0 release. This mode enables you to take advantage of many new improvements in both PostgreSQL compatibility and performance parity, making it even easier to lift and shift your applications from PostgreSQL to YugabyteDB. When this mode is turned on, YugabyteDB uses the [Read-Committed](../../../architecture/transactions/read-committed/) isolation mode, the [Wait-on-Conflict](../../../architecture/transactions/concurrency-control/#wait-on-conflict) concurrency mode for predictable P99 latencies, and the new Cost Based Optimizer that takes advantage of the distributed storage layer architecture and includes query pushdowns, LSM indexes, and [batched nested loop joins](../../../explore/ysql-language-features/join-strategies/#batched-nested-loop-join-bnl) to offer PostgreSQL-like performance. -You can enable the compatibility mode by passing the `enable_pg_parity_early_access` flag to [yugabyted](../../../reference/configuration/yugabyted/#yugabyted), when bringing up your cluster. +You can enable the compatibility mode by passing the `enable_pg_parity_tech_preview` flag to [yugabyted](../../../reference/configuration/yugabyted/#yugabyted), when bringing up your cluster. For example, from your YugabyteDB home directory, run the following command: ```sh -./bin/yugabyted start --enable_pg_parity_early_access +./bin/yugabyted start --enable_pg_parity_tech_preview ``` Note: When enabling the cost models, ensure that packed row for colocated tables is enabled by setting the `--ysql_enable_packed_row_for_colocated_table` flag to true. From bd80f4e53d727171c83d6a8d0c23922e13e68472 Mon Sep 17 00:00:00 2001 From: Basava Date: Mon, 16 Sep 2024 11:00:25 -0500 Subject: [PATCH 36/75] [#23924] DocDB: Address recent flakiness of PgGetLockStatusTest.TestGetWaitStart Summary: After enabling shared memory in release mode, it seems like the test `PgGetLockStatusTest.TestGetWaitStart` is failing in mac release builds because it attempts to query pg_locks even before the waiter gets inserted into the wait-queue. This revision addresses the non-determinism by waiting using sync points instead of sleep. Jira: DB-12826 Test Plan: ./yb_build.sh release --cxx-test='TEST_F(PgGetLockStatusTest, TestGetWaitStart) {' -n 20 --tp 1 Reviewers: rthallam, pjain, patnaik.balivada Reviewed By: patnaik.balivada Subscribers: ybase, yql Differential Revision: https://phorge.dev.yugabyte.com/D38048 --- src/yb/docdb/wait_queue.cc | 2 +- src/yb/yql/pgwrapper/pg_get_lock_status-test.cc | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/yb/docdb/wait_queue.cc b/src/yb/docdb/wait_queue.cc index 85b27ad18d03..4f2b626719e1 100644 --- a/src/yb/docdb/wait_queue.cc +++ b/src/yb/docdb/wait_queue.cc @@ -1385,7 +1385,7 @@ class WaitQueue::Impl { for (auto [blocker, _] : waiter_data->blockers) { blocker->AddWaiter(waiter_data); } - DEBUG_ONLY_TEST_SYNC_POINT("WaitQueue::Impl::SetupWaiterUnlocked:1"); + TEST_SYNC_POINT("WaitQueue::Impl::SetupWaiterUnlocked:1"); return Status::OK(); } diff --git a/src/yb/yql/pgwrapper/pg_get_lock_status-test.cc b/src/yb/yql/pgwrapper/pg_get_lock_status-test.cc index 14c04afd5482..fbec368d9a09 100644 --- a/src/yb/yql/pgwrapper/pg_get_lock_status-test.cc +++ b/src/yb/yql/pgwrapper/pg_get_lock_status-test.cc @@ -483,6 +483,11 @@ TEST_F(PgGetLockStatusTest, TestGetWaitStart) { ASSERT_OK(blocker.StartTransaction(IsolationLevel::READ_COMMITTED)); ASSERT_OK(blocker.FetchFormat("SELECT * FROM $0 WHERE k=$1 FOR UPDATE", table, locked_key)); + yb::SyncPoint::GetInstance()->LoadDependency({ + {"WaitQueue::Impl::SetupWaiterUnlocked:1", "PgGetLockStatusTest::TestGetWaitStart"}}); + yb::SyncPoint::GetInstance()->ClearTrace(); + yb::SyncPoint::GetInstance()->EnableProcessing(); + std::atomic txn_finished = false; std::thread th([&session, &table, &locked_key, &txn_finished] { ASSERT_OK(session.conn->FetchFormat( @@ -490,8 +495,7 @@ TEST_F(PgGetLockStatusTest, TestGetWaitStart) { txn_finished.store(true); }); - SleepFor(1ms * kTimeMultiplier); - + TEST_SYNC_POINT("PgGetLockStatusTest::TestGetWaitStart"); auto res = ASSERT_RESULT(blocker.FetchRow( "SELECT COUNT(*) FROM yb_lock_status(null, null) WHERE waitstart IS NOT NULL")); // The statement above acquires two locks -- From f0a5db706e85412ec85a83f8286c892094d83688 Mon Sep 17 00:00:00 2001 From: Karthik Ramanathan Date: Mon, 16 Sep 2024 00:13:14 -0700 Subject: [PATCH 37/75] [#20908] YSQL: Introduce interface to optimize index non-key column updates Summary: **Background** Postgres updates tuples by deleting the existing copy of the tuple, and reinserting it with an updated value. This applies to both relations (tables) as well as indexes. Thus, Postgres' generalized access method interface includes definitions for inserts and deletes but not updates. DocDB supports the notion of in-place updates (PGSQL_UPDATE) when the key columns of the tuple remain unmodified. This mechanism is leveraged while updating tuples of main tables when the primary key of the table remains unmodified. However, no such mechanism exists for updating index tuples. **Problem** Pggate has a buffering mechanism for write requests which allows multiple write requests to be flushed at once, thus reducing the number of round trips between Postgres and DocDB. To preserve causality/ordering, this buffering mechanism does not allow multiple writes to the same tuple (identified by the tuple's CTID) to be enqueued together in the buffer. The previous write to the tuple is flushed before enqueueing the next write. This poses a problem when updating the non-key columns of indexes. For instance, when the included columns in a covering index need to be updated, the update is modeled as DELETE + INSERT requests. Since there is no change to the key columns of the index, this causes the update to require two flushes. **Solution** This revision introduces an index update interface in postgres for facilitating in-place updates of index tuples in DocDB. This replaces the need to perform a DELETE + INSERT on an index update when no key columns are modified. This is useful in two scenarios: - Updating the INCLUDE columns in both unique and non-unique indexes - Updating the base table CTID in unique indexes Prior to updating the index tuple, the execution can check if the index access method supports in-place updates via the a newly introduced indexAM field `ybamcanupdatetupleinplace`. Currently, in-place updates are only supported for LSM indexes. Note - The base table CTID is a part of the index key for non-unique indexes. **Example** Consider a table containing two covering indexes, one unique and one non-unique. The example below displays ``` yugabyte=# \d demo Table "public.demo" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- k | integer | | not null | v1 | integer | | | v2 | integer | | | v3 | integer | | | v4 | integer | | | Indexes: "demo_pkey" PRIMARY KEY, lsm (k HASH) "demo_v2_v4_idx" UNIQUE, lsm (v2 HASH) INCLUDE (v4) "demo_v1_v3_idx" lsm (v1 HASH) INCLUDE (v3) yugabyte=# SET yb_explain_hide_non_deterministic_fields TO true; -- Query 1: Update of included columns in non-unique index yugabyte=# EXPLAIN (ANALYZE, DIST) UPDATE demo SET v3 = v3 + 1 WHERE k = 1; QUERY PLAN ----------------------------------------------------------------------------------------------------- Update on demo (cost=0.00..4.12 rows=1 width=96) (actual rows=0 loops=1) -> Index Scan using demo_pkey on demo (cost=0.00..4.12 rows=1 width=96) (actual rows=1 loops=1) Index Cond: (k = 1) Storage Table Read Requests: 1 Storage Table Rows Scanned: 1 Storage Table Write Requests: 1 Storage Index Write Requests: 1 // would have been 2 previously Storage Read Requests: 1 Storage Rows Scanned: 1 Storage Write Requests: 2 Storage Flush Requests: 1 // would have been 2 previously (11 rows) -- Query 2: Update of primary key + included columns in unique index yugabyte=# EXPLAIN (ANALYZE, DIST) UPDATE demo SET k = k + 1, v4 = v3 + 1 WHERE k = 10; QUERY PLAN ----------------------------------------------------------------------------------------------------- Update on demo (cost=0.00..4.12 rows=1 width=96) (actual rows=0 loops=1) -> Index Scan using demo_pkey on demo (cost=0.00..4.12 rows=1 width=96) (actual rows=1 loops=1) Index Cond: (k = 10) Storage Table Read Requests: 1 Storage Table Rows Scanned: 1 Storage Table Write Requests: 2 Storage Index Write Requests: 3 // would have been 4 previously Storage Read Requests: 1 Storage Rows Scanned: 1 Storage Write Requests: 5 Storage Flush Requests: 1 // would have been 2 previously (11 rows) ``` Previously, both of the above cases would have required two flushes as the key columns of the respective indexes remain unchanged across the DELETE and INSERT requests. **Future Work** - The computation of whether key columns of an index are modified, can be performed at planning time. This can be done once D34040 lands. - Refactor ApplyUpdate in pgsql_operation.cc to delineate table update (main table, index) and sequence update operations. Jira: DB-9891 Test Plan: Run the following tests: ``` ./yb_build.sh --java-test 'org.yb.pgsql.TestPgRegressUpdateOptimized#schedule' ``` Reviewers: amartsinchyk, tnayak, jason Reviewed By: amartsinchyk, tnayak Subscribers: ybase, smishra, yql Tags: #jenkins-ready Differential Revision: https://phorge.dev.yugabyte.com/D36588 --- .../src/backend/access/index/indexam.c | 17 + .../src/backend/access/yb_access/yb_lsm.c | 103 + src/postgres/src/backend/access/ybgin/ybgin.c | 2 + .../src/backend/executor/execIndexing.c | 630 ++++-- .../src/backend/executor/nodeModifyTable.c | 11 +- .../src/backend/executor/ybcModifyTable.c | 26 + src/postgres/src/include/access/amapi.h | 12 + src/postgres/src/include/access/genam.h | 5 + src/postgres/src/include/executor/executor.h | 8 + .../src/include/executor/ybcModifyTable.h | 15 + src/postgres/src/include/utils/relcache.h | 4 + .../test/regress/expected/yb_lock_status.out | 10 +- .../regress/expected/yb_lock_status_1.out | 8 +- .../regress/expected/yb_lock_status_2.out | 8 +- .../yb_update_optimize_index_updates.out | 1712 +++++++++++++++++ .../expected/yb_update_optimize_indices.out | 36 +- .../sql/yb_update_optimize_index_updates.sql | 263 +++ .../test/regress/yb_update_optimized_schedule | 1 + .../pgvector/src/ybvector/ybvector.c | 2 + .../pgvector/src/ybvector/ybvector.h | 3 + .../pgvector/src/ybvector/ybvectorwrite.c | 7 + src/yb/docdb/pgsql_operation.cc | 13 +- 22 files changed, 2701 insertions(+), 195 deletions(-) create mode 100644 src/postgres/src/test/regress/expected/yb_update_optimize_index_updates.out create mode 100644 src/postgres/src/test/regress/sql/yb_update_optimize_index_updates.sql diff --git a/src/postgres/src/backend/access/index/indexam.c b/src/postgres/src/backend/access/index/indexam.c index 9519375d1382..49448e054943 100644 --- a/src/postgres/src/backend/access/index/indexam.c +++ b/src/postgres/src/backend/access/index/indexam.c @@ -265,6 +265,23 @@ index_delete(Relation indexRelation, indexInfo); } +void +yb_index_update(Relation indexRelation, + Datum *values, + bool *isnull, + Datum oldYbctid, + Datum newYbctid, + Relation heapRelation, + struct IndexInfo *indexInfo) +{ + RELATION_CHECKS; + CHECK_REL_PROCEDURE(yb_amupdate); + + indexRelation->rd_amroutine->yb_amupdate(indexRelation, values, isnull, + oldYbctid, newYbctid, + heapRelation, indexInfo); +} + /* * index_beginscan - start a scan of an index with amgettuple * diff --git a/src/postgres/src/backend/access/yb_access/yb_lsm.c b/src/postgres/src/backend/access/yb_access/yb_lsm.c index d5ca063696c2..29e32ea3d6b3 100644 --- a/src/postgres/src/backend/access/yb_access/yb_lsm.c +++ b/src/postgres/src/backend/access/yb_access/yb_lsm.c @@ -133,6 +133,98 @@ doBindsForIdxWrite(YBCPgStatement stmt, } +static void +doAssignForIdxUpdate(YBCPgStatement stmt, + Relation index, + Datum *values, + bool *isnull, + int n_atts, + Datum old_ybbasectid, + Datum new_ybbasectid) +{ + TupleDesc tupdesc = RelationGetDescr(index); + int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(index); + + if (old_ybbasectid == 0) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("Missing base table ybctid in index write request"))); + } + + bool has_null_attr = false; + for (AttrNumber attnum = 1; attnum <= n_atts; ++attnum) + { + Oid type_id = GetTypeId(attnum, tupdesc); + Oid collation_id = YBEncodingCollation(stmt, attnum, + ybc_get_attcollation(tupdesc, attnum)); + Datum value = values[attnum - 1]; + bool is_null = isnull[attnum - 1]; + + YBCPgExpr ybc_expr = YBCNewConstant(stmt, type_id, collation_id, value, is_null); + + /* + * Attrs that are a part of the index key are 'bound' to their values. + * It is guaranteed by YbExecUpdateIndexTuples that the values of these + * attrs are unmodified. The non-key attrs are 'assigned' to their + * new (updated) values. These represent the values that are undergoing + * the update. + */ + if (attnum <= indnkeyatts) + { + HandleYBStatus(YBCPgDmlBindColumn(stmt, attnum, ybc_expr)); + + /* + * In a unique index, if any of the key columns are NULL, we need + * to handle NULL != NULL semantics. + */ + has_null_attr = has_null_attr || is_null; + } + else + HandleYBStatus(YBCPgDmlAssignColumn(stmt, attnum, ybc_expr)); + } + + bool unique_index = index->rd_index->indisunique; + + /* + * For a non-unique index, the base table CTID attribute is a part of the + * index key. Therefore, updates to the primary key require the index + * tuple to be deleted and re-inserted, and will not utilize this function. + * For a unique index, the base table CTID attribute is not a part of the + * index key and can be updated in place. Handle that here. + */ + if (new_ybbasectid != (Datum) NULL) + { + Assert(unique_index); + + YBCPgExpr ybc_expr = YBCNewConstant(stmt, BYTEAOID, InvalidOid, new_ybbasectid, false); + HandleYBStatus(YBCPgDmlAssignColumn(stmt, YBIdxBaseTupleIdAttributeNumber, ybc_expr)); + } + + /* + * Bind to key columns that do not have an attnum in postgres: + * - For non-unique indexes, this is the base table CTID. + * - For unique indexes, this is the unique key suffix. + */ + if (!unique_index) + YbBindDatumToColumn(stmt, + YBIdxBaseTupleIdAttributeNumber, + BYTEAOID, + InvalidOid, + old_ybbasectid, + false, + NULL /* null_type_entity */); + + else + YbBindDatumToColumn(stmt, + YBUniqueIdxKeySuffixAttributeNumber, + BYTEAOID, + InvalidOid, + old_ybbasectid, + !has_null_attr /* is_null */, + NULL /* null_type_entity */); +} + static void ybcinbuildCallback(Relation index, HeapTuple heapTuple, Datum *values, bool *isnull, bool tupleIsAlive, void *state) @@ -268,6 +360,15 @@ ybcindelete(Relation index, Datum *values, bool *isnull, Datum ybctid, Relation doBindsForIdxWrite, NULL /* indexstate */); } +static void +ybcinupdate(Relation index, Datum *values, bool *isnull, Datum oldYbctid, + Datum newYbctid, Relation heap, struct IndexInfo *indexInfo) +{ + Assert(!index->rd_index->indisprimary); + YBCExecuteUpdateIndex(index, values, isnull, oldYbctid, newYbctid, + doAssignForIdxUpdate); +} + static IndexBulkDeleteResult * ybcinbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state) @@ -628,6 +729,7 @@ ybcinhandler(PG_FUNCTION_ARGS) amroutine->ampredlocks = true; amroutine->amcanparallel = true; amroutine->amcaninclude = true; + amroutine->ybamcanupdatetupleinplace = true; amroutine->amkeytype = InvalidOid; amroutine->ambuild = ybcinbuild; @@ -653,6 +755,7 @@ ybcinhandler(PG_FUNCTION_ARGS) amroutine->yb_amisforybrelation = true; amroutine->yb_aminsert = ybcininsert; amroutine->yb_amdelete = ybcindelete; + amroutine->yb_amupdate = ybcinupdate; amroutine->yb_ambackfill = ybcinbackfill; amroutine->yb_ammightrecheck = ybcinmightrecheck; amroutine->yb_amgetbitmap = ybcgetbitmap; diff --git a/src/postgres/src/backend/access/ybgin/ybgin.c b/src/postgres/src/backend/access/ybgin/ybgin.c index 83f5a6d72f5e..f281daa91701 100644 --- a/src/postgres/src/backend/access/ybgin/ybgin.c +++ b/src/postgres/src/backend/access/ybgin/ybgin.c @@ -55,6 +55,7 @@ ybginhandler(PG_FUNCTION_ARGS) amroutine->ampredlocks = true; /* TODO(jason): check what this is */ amroutine->amcanparallel = false; amroutine->amcaninclude = false; + amroutine->ybamcanupdatetupleinplace = false; amroutine->amkeytype = InvalidOid; amroutine->ambuild = ybginbuild; @@ -80,6 +81,7 @@ ybginhandler(PG_FUNCTION_ARGS) amroutine->yb_amisforybrelation = true; amroutine->yb_aminsert = ybgininsert; amroutine->yb_amdelete = ybgindelete; + amroutine->yb_amupdate = NULL; amroutine->yb_ambackfill = ybginbackfill; amroutine->yb_ammightrecheck = ybginmightrecheck; amroutine->yb_ambindschema = ybginbindschema; diff --git a/src/postgres/src/backend/executor/execIndexing.c b/src/postgres/src/backend/executor/execIndexing.c index 432eceac3bcc..dc7341c931a6 100644 --- a/src/postgres/src/backend/executor/execIndexing.c +++ b/src/postgres/src/backend/executor/execIndexing.c @@ -114,7 +114,10 @@ #include "storage/lmgr.h" #include "utils/tqual.h" +/* YB includes. */ +#include "catalog/pg_am_d.h" #include "executor/ybcModifyTable.h" +#include "utils/relcache.h" /* waitMode argument to check_exclusion_or_unique_constraint() */ typedef enum @@ -248,6 +251,147 @@ ExecCloseIndices(ResultRelInfo *resultRelInfo) */ } +/* ---------------------------------------------------------------- + * YbExecDoInsertIndexTuple + * + * This routine performs insertion of an index tuple of 'indexRelation' + * that is identified by a combination of the base table CTID ('tuple->t_ybctid') + * and data in the base table's tuple slot ('slot'). + * This routine has been refactored out of ExecInsertIndexTuples so that + * Yugabyte's index update routine can invoke this routine directly without + * needing to duplicate a bunch of checks in ExecInsertIndexTuples. + * This routine is invoked by both Yugabyte and non-YB relations. + * ---------------------------------------------------------------- + */ +static bool +YbExecDoInsertIndexTuple(ResultRelInfo *resultRelInfo, + Relation indexRelation, + IndexInfo *indexInfo, + TupleTableSlot *slot, + HeapTuple tuple, + EState *estate, + bool noDupErr, + bool *specConflict, + List *arbiterIndexes) +{ + bool applyNoDupErr; + IndexUniqueCheck checkUnique; + bool satisfiesConstraint; + bool deferredCheck = false; + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + + /* + * FormIndexDatum fills in its values and isnull parameters with the + * appropriate values for the column(s) of the index. + */ + FormIndexDatum(indexInfo, + slot, + estate, + values, + isnull); + + /* Check whether to apply noDupErr to this index */ + applyNoDupErr = noDupErr && + (arbiterIndexes == NIL || + list_member_oid(arbiterIndexes, + indexRelation->rd_index->indexrelid)); + + /* + * The index AM does the actual insertion, plus uniqueness checking. + * + * For an immediate-mode unique index, we just tell the index AM to + * throw error if not unique. + * + * For a deferrable unique index, we tell the index AM to just detect + * possible non-uniqueness, and we add the index OID to the result + * list if further checking is needed. + * + * For a speculative insertion (used by INSERT ... ON CONFLICT), do + * the same as for a deferrable unique index. + */ + if (!indexRelation->rd_index->indisunique) + checkUnique = UNIQUE_CHECK_NO; + else if (applyNoDupErr) + checkUnique = UNIQUE_CHECK_PARTIAL; + else if (indexRelation->rd_index->indimmediate) + checkUnique = UNIQUE_CHECK_YES; + else + checkUnique = UNIQUE_CHECK_PARTIAL; + + satisfiesConstraint = + index_insert(indexRelation, /* index relation */ + values, /* array of index Datums */ + isnull, /* null flags */ + &(tuple->t_self), /* tid of heap tuple */ + tuple, /* heap tuple */ + resultRelInfo->ri_RelationDesc, /* heap relation */ + checkUnique, /* type of uniqueness check to do */ + indexInfo, /* index AM may need this */ + false); /* yb_shared_insert */ + + /* + * If the index has an associated exclusion constraint, check that. + * This is simpler than the process for uniqueness checks since we + * always insert first and then check. If the constraint is deferred, + * we check now anyway, but don't throw error on violation or wait for + * a conclusive outcome from a concurrent insertion; instead we'll + * queue a recheck event. Similarly, noDupErr callers (speculative + * inserters) will recheck later, and wait for a conclusive outcome + * then. + * + * An index for an exclusion constraint can't also be UNIQUE (not an + * essential property, we just don't allow it in the grammar), so no + * need to preserve the prior state of satisfiesConstraint. + */ + if (indexInfo->ii_ExclusionOps != NULL) + { + bool violationOK; + CEOUC_WAIT_MODE waitMode; + + if (applyNoDupErr) + { + violationOK = true; + waitMode = CEOUC_LIVELOCK_PREVENTING_WAIT; + } + else if (!indexRelation->rd_index->indimmediate) + { + violationOK = true; + waitMode = CEOUC_NOWAIT; + } + else + { + violationOK = false; + waitMode = CEOUC_WAIT; + } + + satisfiesConstraint = + check_exclusion_or_unique_constraint(resultRelInfo->ri_RelationDesc, + indexRelation, indexInfo, + &(tuple->t_self), values, isnull, + estate, false, + waitMode, violationOK, NULL, + NULL /* ybConflictSlot */); + } + + if ((checkUnique == UNIQUE_CHECK_PARTIAL || + indexInfo->ii_ExclusionOps != NULL) && + !satisfiesConstraint) + { + /* + * The tuple potentially violates the uniqueness or exclusion + * constraint, so make a note of the index so that we can re-check + * it later. Speculative inserters are told if there was a + * speculative conflict, since that always requires a restart. + */ + deferredCheck = true; + if (indexRelation->rd_index->indimmediate && specConflict) + *specConflict = true; + } + + return deferredCheck; +} + /* ---------------------------------------------------------------- * ExecInsertIndexTuples * @@ -299,8 +443,6 @@ ExecInsertIndexTuplesOptimized(TupleTableSlot *slot, Relation heapRelation; IndexInfo **indexInfoArray; ExprContext *econtext; - Datum values[INDEX_MAX_KEYS]; - bool isnull[INDEX_MAX_KEYS]; bool isYBRelation; /* @@ -329,18 +471,6 @@ ExecInsertIndexTuplesOptimized(TupleTableSlot *slot, { Relation indexRelation = relationDescs[i]; IndexInfo *indexInfo; - bool applyNoDupErr; - IndexUniqueCheck checkUnique; - bool satisfiesConstraint; - - /* - * For an update command check if we need to skip index. For that purpose, - * we check if the relid of the index is part of the skip list. - */ - if (indexRelation == NULL || - list_member_oid(estate->yb_skip_entities.index_list, - RelationGetRelid(indexRelation))) - continue; indexInfo = indexInfoArray[i]; Assert(indexInfo->ii_ReadyForInserts == @@ -381,118 +511,53 @@ ExecInsertIndexTuplesOptimized(TupleTableSlot *slot, continue; } - /* - * FormIndexDatum fills in its values and isnull parameters with the - * appropriate values for the column(s) of the index. - */ - FormIndexDatum(indexInfo, - slot, - estate, - values, - isnull); - - /* Check whether to apply noDupErr to this index */ - applyNoDupErr = noDupErr && - (arbiterIndexes == NIL || - list_member_oid(arbiterIndexes, - indexRelation->rd_index->indexrelid)); - - /* - * The index AM does the actual insertion, plus uniqueness checking. - * - * For an immediate-mode unique index, we just tell the index AM to - * throw error if not unique. - * - * For a deferrable unique index, we tell the index AM to just detect - * possible non-uniqueness, and we add the index OID to the result - * list if further checking is needed. - * - * For a speculative insertion (used by INSERT ... ON CONFLICT), do - * the same as for a deferrable unique index. - */ - if (!indexRelation->rd_index->indisunique) - checkUnique = UNIQUE_CHECK_NO; - else if (applyNoDupErr) - checkUnique = UNIQUE_CHECK_PARTIAL; - else if (indexRelation->rd_index->indimmediate) - checkUnique = UNIQUE_CHECK_YES; - else - checkUnique = UNIQUE_CHECK_PARTIAL; - - satisfiesConstraint = - index_insert(indexRelation, /* index relation */ - values, /* array of index Datums */ - isnull, /* null flags */ - &(tuple->t_self), /* tid of heap tuple */ - tuple, /* heap tuple */ - heapRelation, /* heap relation */ - checkUnique, /* type of uniqueness check to do */ - indexInfo, /* index AM may need this */ - false); /* yb_shared_insert */ - - /* - * If the index has an associated exclusion constraint, check that. - * This is simpler than the process for uniqueness checks since we - * always insert first and then check. If the constraint is deferred, - * we check now anyway, but don't throw error on violation or wait for - * a conclusive outcome from a concurrent insertion; instead we'll - * queue a recheck event. Similarly, noDupErr callers (speculative - * inserters) will recheck later, and wait for a conclusive outcome - * then. - * - * An index for an exclusion constraint can't also be UNIQUE (not an - * essential property, we just don't allow it in the grammar), so no - * need to preserve the prior state of satisfiesConstraint. - */ - if (indexInfo->ii_ExclusionOps != NULL) - { - bool violationOK; - CEOUC_WAIT_MODE waitMode; - - if (applyNoDupErr) - { - violationOK = true; - waitMode = CEOUC_LIVELOCK_PREVENTING_WAIT; - } - else if (!indexRelation->rd_index->indimmediate) - { - violationOK = true; - waitMode = CEOUC_NOWAIT; - } - else - { - violationOK = false; - waitMode = CEOUC_WAIT; - } - - satisfiesConstraint = - check_exclusion_or_unique_constraint(heapRelation, - indexRelation, indexInfo, - &(tuple->t_self), values, isnull, - estate, false, - waitMode, violationOK, NULL, - NULL /* ybConflictSlot */); - } - - if ((checkUnique == UNIQUE_CHECK_PARTIAL || - indexInfo->ii_ExclusionOps != NULL) && - !satisfiesConstraint) - { - /* - * The tuple potentially violates the uniqueness or exclusion - * constraint, so make a note of the index so that we can re-check - * it later. Speculative inserters are told if there was a - * speculative conflict, since that always requires a restart. - */ + if (YbExecDoInsertIndexTuple(resultRelInfo, indexRelation, indexInfo, + slot, tuple, estate, noDupErr, + specConflict, arbiterIndexes)) result = lappend_oid(result, RelationGetRelid(indexRelation)); - if (indexRelation->rd_index->indimmediate && specConflict) - *specConflict = true; - } } return result; } +/* ---------------------------------------------------------------- + * YbExecDoDeleteIndexTuple + * + * This routine performs deletion of an index tuple of 'indexRelation' + * that is identified by a combination of the base table CTID ('ybctid') + * and data in the base table's tuple slot ('slot'). + * This routine has been refactored out of ExecDeleteIndexTuples so that + * Yugabyte's index update routine can invoke this routine directly without + * needing to duplicate a bunch of checks in ExecDeleteIndexTuples. + * This routine is currently only invoked by Yugabyte relations. + * ---------------------------------------------------------------- + */ +static void +YbExecDoDeleteIndexTuple(ResultRelInfo *resultRelInfo, + Relation indexRelation, + IndexInfo *indexInfo, + TupleTableSlot *slot, + Datum ybctid, + EState *estate) +{ + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + /* + * FormIndexDatum fills in its values and isnull parameters with the + * appropriate values for the column(s) of the index. + */ + FormIndexDatum(indexInfo, slot, estate, values, isnull); + + MemoryContext oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + index_delete(indexRelation, /* index relation */ + values, /* array of index Datums */ + isnull, /* null flags */ + ybctid, /* ybctid */ + resultRelInfo->ri_RelationDesc, /* heap relation */ + indexInfo); /* index AM may need this */ + MemoryContextSwitchTo(oldContext); +} + /* ---------------------------------------------------------------- * ExecDeleteIndexTuples * @@ -521,8 +586,6 @@ ExecDeleteIndexTuplesOptimized(Datum ybctid, IndexInfo **indexInfoArray; ExprContext *econtext; TupleTableSlot *slot; - Datum values[INDEX_MAX_KEYS]; - bool isnull[INDEX_MAX_KEYS]; bool isYBRelation; /* @@ -558,15 +621,6 @@ ExecDeleteIndexTuplesOptimized(Datum ybctid, Relation indexRelation = relationDescs[i]; IndexInfo *indexInfo; - /* - * For an update command check if we need to skip index. - * For that purpose, we check if the relid of the index is part of the skip list. - */ - if (indexRelation == NULL || - list_member_oid(estate->yb_skip_entities.index_list, - RelationGetRelid(indexRelation))) - continue; - /* * No need to update YugaByte primary key which is intrinic part of * the base table. @@ -617,28 +671,308 @@ ExecDeleteIndexTuplesOptimized(Datum ybctid, continue; } + YbExecDoDeleteIndexTuple(resultRelInfo, indexRelation, indexInfo, + slot, ybctid, estate); + } + + /* Drop the temporary slot */ + ExecDropSingleTupleTableSlot(slot); +} + +static void +YbExecDoUpdateIndexTuple(ResultRelInfo *resultRelInfo, + Relation indexRelation, + IndexInfo *indexInfo, + TupleTableSlot *slot, + Datum oldYbctid, + Datum newYbctid, + EState *estate) +{ + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + /* + * FormIndexDatum fills in its values and isnull parameters with the + * appropriate values for the column(s) of the index. + */ + FormIndexDatum(indexInfo, slot, estate, values, isnull); + + MemoryContext oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + yb_index_update(indexRelation, /* index relation */ + values, /* array of index Datums */ + isnull, /* null flags */ + oldYbctid, /* old ybctid */ + newYbctid, /* ybctid */ + resultRelInfo->ri_RelationDesc, /* heap relation */ + indexInfo); /* index AM may need this */ + MemoryContextSwitchTo(oldContext); +} + +List * +YbExecUpdateIndexTuples(TupleTableSlot *slot, + Datum ybctid, + HeapTuple oldtuple, + HeapTuple tuple, + EState *estate, + Bitmapset *updatedCols, + bool is_pk_updated) +{ + ResultRelInfo *resultRelInfo; + int i; + int numIndices; + RelationPtr relationDescs; + IndexInfo **indexInfoArray; + ExprContext *econtext; + TupleTableSlot *deleteSlot; + List *insertIndexes = NIL; /* A list of indexes whose tuples need to be reinserted */ + List *deleteIndexes = NIL; /* A list of indexes whose tuples need to be deleted */ + List *result = NIL; + Datum newYbctid = is_pk_updated ? + YBCGetYBTupleIdFromSlot(slot) : + (Datum) NULL; + + /* + * Get information from the result relation info structure. + */ + resultRelInfo = estate->es_result_relation_info; + numIndices = resultRelInfo->ri_NumIndices; + relationDescs = resultRelInfo->ri_IndexRelationDescs; + indexInfoArray = resultRelInfo->ri_IndexRelationInfo; + + Assert(IsYBRelation(resultRelInfo->ri_RelationDesc)); + + /* + * We will use the EState's per-tuple context for evaluating predicates + * and index expressions (creating it if it's not already there). + */ + econtext = GetPerTupleExprContext(estate); + + /* + * Arrange for econtext's scan tuple to be the tuple under test using + * a temporary slot. + */ + deleteSlot = ExecStoreHeapTuple( + oldtuple, + MakeSingleTupleTableSlot( + RelationGetDescr(resultRelInfo->ri_RelationDesc)), + false); + + for (i = 0; i < numIndices; i++) + { + Relation indexRelation = relationDescs[i]; + IndexInfo *indexInfo; + const AttrNumber offset = + YBGetFirstLowInvalidAttributeNumber(resultRelInfo->ri_RelationDesc); + + /* + * For an update command check if we need to skip index. + * For that purpose, we check if the relid of the index is part of the + * skip list. + */ + if (indexRelation == NULL || + list_member_oid(estate->yb_skip_entities.index_list, + RelationGetRelid(indexRelation))) + continue; + + Form_pg_index indexData = indexRelation->rd_index; + /* + * Primary key is a part of the base relation in Yugabyte and does not + * need to be updated here. + */ + if (indexData->indisprimary) + continue; + + indexInfo = indexInfoArray[i]; + + /* + * Check for partial index - + * There are four different update scenarios for an index with a predicate: + * 1. Both the old and new tuples satisfy the predicate - In this case, the index tuple + * may either be updated in-place or deleted and reinserted depending on whether the + * key columns are modified. + * 2. Neither the old nor the new tuple satisfy the predicate - In this case, the + * update of this index can be skipped altogether. + * 3. The old tuple satisfies the predicate but the new tuple does not - In this case, + * the index tuple corresponding to the old tuple just needs to be deleted. + * 4. The old tuple does not satisfy the predicate but the new tuple does - In this case, + * a new index tuple corresponding to the new tuple needs to be inserted. + */ + if (indexInfo->ii_Predicate != NIL) + { + ExprState *predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate); + bool deleteApplicable = false; + bool insertApplicable = false; + + /* + * If predicate state not set up yet, create it (in the estate's + * per-query context) + */ + econtext->ecxt_scantuple = deleteSlot; + deleteApplicable = ExecQual(predicate, econtext); + + econtext->ecxt_scantuple = slot; + insertApplicable = ExecQual(predicate, econtext); + + if (deleteApplicable != insertApplicable) + { + /* + * Update is not possible as only one of (deletes, inserts) is + * applicable. Bail out of further checks. + */ + if (deleteApplicable) + deleteIndexes = lappend_int(deleteIndexes, i); + + if (insertApplicable) + insertIndexes = lappend_int(insertIndexes, i); + + continue; + } + + + if (!deleteApplicable) + { + /* Neither deletes nor updates applicable. Nothing to be done for this index. */ + continue; + } + + if (CheckUpdateExprOrPred(updatedCols, indexRelation, Anum_pg_index_indpred, offset)) + { + deleteIndexes = lappend_int(deleteIndexes, i); + insertIndexes = lappend_int(insertIndexes, i); + continue; + } + } + /* - * FormIndexDatum fills in its values and isnull parameters with the - * appropriate values for the column(s) of the index. + * Check if any of the columns associated with the expression index have + * been modified. This can be done without evaluating the expression + * itself. + * Note that an expression index can have other key columns in addition + * to the expression(s). That is, an expression index can be defined + * like so: + * CREATE INDEX ON table (f1(a, b), f2(b, c), d, e) INCLUDE (f, g, h); + * Such an index can be updated inplace, only if none of (a, b, c, d, e) + * have been modified. */ - FormIndexDatum(indexInfo, - slot, - estate, - values, - isnull); + if (indexInfo->ii_Expressions != NIL) + { + if (CheckUpdateExprOrPred(updatedCols, indexRelation, Anum_pg_index_indexprs, offset)) + { + deleteIndexes = lappend_int(deleteIndexes, i); + insertIndexes = lappend_int(insertIndexes, i); + continue; + } + } - MemoryContext oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - index_delete(indexRelation, /* index relation */ - values, /* array of index Datums */ - isnull, /* null flags */ - ybctid, /* ybctid */ - heapRelation, /* heap relation */ - indexInfo); /* index AM may need this */ - MemoryContextSwitchTo(oldContext); + if (!indexRelation->rd_amroutine->ybamcanupdatetupleinplace) + { + deleteIndexes = lappend_int(deleteIndexes, i); + insertIndexes = lappend_int(insertIndexes, i); + continue; + } + + /* + * In the following scenarios, the index tuple can be modified (updated) + * in-place, without the need to delete and reinsert the tuple: + * - The index is a covering index (number of key columns < number of columns in the index), + * only the non-key columns need to be updated, and the primary key is not updated. + * - The index is a unique index and only the non-key columns of the index need to be + * updated (irrespective of whether the primary key is updated). + */ + if ((indexData->indnkeyatts == indexData->indnatts || is_pk_updated) && + (!indexData->indisunique)) + { + deleteIndexes = lappend_int(deleteIndexes, i); + insertIndexes = lappend_int(insertIndexes, i); + continue; + } + + /* + * The index operations in this function get enqueued into a buffer. The + * buffer is flushed prematurely when there are two operations to the + * same row. This leads to additional roundtrips to the storage layer + * which can be avoided. A tuple is identified by a sequence of its key + * columns. In the case where the key columns are specified in an update + * query, but remain unmodified, the update is modeled as a + * DELETE + INSERT operation when the optimization to detect unmodified + * columns is disabled. In such cases, the DELETE and INSERT operations + * conflict with each other since the tuple's key columns remain unchanged. + * Consider the following example of a relation with four indexes that + * has columns C1, C2, C3 of a tuple modified (updated) by a query: + * Index I1 on (C1, C2) + * Index I2 on (C4, C5, C3) + * Index I3 on (C4, C5) INCLUDES (C1, C2) + * Index I4 on (C4) INCLUDES (C5) + * + * The order of operations should be: + * (1) Buffer UPDATE tuple of I3 + * (2) Buffer DELETE tuple of I1 + * (3) Buffer DELETE tuple of I2 + * (-) -- Flush -- + * (4) Buffer INSERT tuple of I1 + * (5) Buffer INSERT tuple of I2 + * (-) -- Flush -- + * Operations related to I4 are skipped altogether because none of the + * columns in I4 are updated. + * + * To achieve this, we compute the list of all indexes whose key columns + * are updated. These need the DELETE + INSERT. For all indexes, first + * issue the deletes, followed by the inserts. + */ + + int j = 0; + for (; j < indexData->indnkeyatts; j++) + { + const AttrNumber bms_idx = indexData->indkey.values[j] - offset; + if (bms_is_member(bms_idx, updatedCols)) + break; + } + + if (j < indexRelation->rd_index->indnkeyatts) + { + deleteIndexes = lappend_int(deleteIndexes, i); + insertIndexes = lappend_int(insertIndexes, i); + continue; + } + + /* + * This tuple updates only non-key columns of the index. This implies + * that the tuple will continue to satisfy all uniqueness and exclusion + * constraints on the index after the update. The index need not be + * rechecked. + */ + econtext->ecxt_scantuple = slot; + YbExecDoUpdateIndexTuple(resultRelInfo, indexRelation, indexInfo, + slot, ybctid, newYbctid, estate); } - /* Drop the temporary slot */ - ExecDropSingleTupleTableSlot(slot); + ListCell *lc; + int index; + + econtext->ecxt_scantuple = deleteSlot; + foreach(lc, deleteIndexes) + { + index = lfirst_int(lc); + YbExecDoDeleteIndexTuple(resultRelInfo, relationDescs[index], + indexInfoArray[index], deleteSlot, ybctid, + estate); + } + + econtext->ecxt_scantuple = slot; + foreach(lc, insertIndexes) + { + index = lfirst_int(lc); + if (YbExecDoInsertIndexTuple(resultRelInfo, relationDescs[index], + indexInfoArray[index], slot, tuple, estate, + false /* noDupErr */, + NULL /* specConflict */, + NIL /* arbiterIndexes */)) + result = lappend_oid(result, RelationGetRelid(relationDescs[index])); + } + + /* Drop the temporary slots */ + ExecDropSingleTupleTableSlot(deleteSlot); + + return result; } /* ---------------------------------------------------------------- diff --git a/src/postgres/src/backend/executor/nodeModifyTable.c b/src/postgres/src/backend/executor/nodeModifyTable.c index f253093353bd..28ef808ef530 100644 --- a/src/postgres/src/backend/executor/nodeModifyTable.c +++ b/src/postgres/src/backend/executor/nodeModifyTable.c @@ -1562,14 +1562,9 @@ ExecUpdate(ModifyTableState *mtstate, if (YBCRelInfoHasSecondaryIndices(resultRelInfo) && mtstate->yb_fetch_target_tuple) { - Datum ybctid = YBCGetYBTupleIdFromSlot(planSlot); - - /* Delete index entries of the old tuple */ - ExecDeleteIndexTuplesOptimized(ybctid, oldtuple, estate); - - /* Insert new index entries for tuple */ - recheckIndexes = ExecInsertIndexTuplesOptimized( - slot, tuple, estate, false, NULL, NIL); + recheckIndexes = YbExecUpdateIndexTuples( + slot, YBCGetYBTupleIdFromSlot(planSlot), oldtuple, tuple, + estate, cols_marked_for_update, is_pk_updated); } bms_free(cols_marked_for_update); diff --git a/src/postgres/src/backend/executor/ybcModifyTable.c b/src/postgres/src/backend/executor/ybcModifyTable.c index 6c2ddedd30dc..9c351ebdffbb 100644 --- a/src/postgres/src/backend/executor/ybcModifyTable.c +++ b/src/postgres/src/backend/executor/ybcModifyTable.c @@ -1082,6 +1082,32 @@ bool YBCExecuteUpdate(Relation rel, return rows_affected_count > 0; } +void YBCExecuteUpdateIndex(Relation index, + Datum *values, + bool *isnull, + Datum oldYbctid, + Datum newYbctid, + yb_assign_for_write_function callback) +{ + Assert(index->rd_rel->relkind == RELKIND_INDEX); + + Oid dboid = YBCGetDatabaseOid(index); + YBCPgStatement update_stmt = NULL; + + /* Create the UPDATE request and add the values from the tuple. */ + HandleYBStatus(YBCPgNewUpdate(dboid, + YbGetRelfileNodeId(index), + YBCIsRegionLocal(index), + &update_stmt, + YB_TRANSACTIONAL)); + + callback(update_stmt, index, values, isnull, + RelationGetNumberOfAttributes(index), + oldYbctid, newYbctid); + + YBCApplyWriteStmt(update_stmt, index); +} + bool YBCExecuteUpdateLoginAttempts(Oid roleid, int failed_attempts, diff --git a/src/postgres/src/include/access/amapi.h b/src/postgres/src/include/access/amapi.h index 1ac877302f06..839ed85f55ea 100644 --- a/src/postgres/src/include/access/amapi.h +++ b/src/postgres/src/include/access/amapi.h @@ -100,6 +100,15 @@ typedef void (*yb_amdelete_function) (Relation indexRelation, Relation heapRelation, struct IndexInfo *indexInfo); +/* update the tuple identified by 'oldYbctid' for a Yugabyte-based index */ +typedef void (*yb_amupdate_function) (Relation indexRelation, + Datum *values, + bool *isnull, + Datum oldYbctid, + Datum newYbctid, + Relation heapRelation, + struct IndexInfo *indexInfo); + /* backfill this Yugabyte-based index */ typedef IndexBuildResult *(*yb_ambackfill_function) (Relation heapRelation, Relation indexRelation, @@ -241,6 +250,8 @@ typedef struct IndexAmRoutine bool amcanparallel; /* does AM support columns included with clause INCLUDE? */ bool amcaninclude; + /* does AM support in-place update of non-key columns? */ + bool ybamcanupdatetupleinplace; /* type of data stored in index, or InvalidOid if variable */ Oid amkeytype; @@ -281,6 +292,7 @@ typedef struct IndexAmRoutine /* YB functions */ yb_aminsert_function yb_aminsert; yb_amdelete_function yb_amdelete; + yb_amupdate_function yb_amupdate; yb_ambackfill_function yb_ambackfill; yb_ammightrecheck_function yb_ammightrecheck; yb_amgetbitmap_function yb_amgetbitmap; diff --git a/src/postgres/src/include/access/genam.h b/src/postgres/src/include/access/genam.h index 75de9f244215..c008cb94385f 100644 --- a/src/postgres/src/include/access/genam.h +++ b/src/postgres/src/include/access/genam.h @@ -144,6 +144,11 @@ extern void index_delete(Relation indexRelation, Datum ybctid, Relation heapRelation, struct IndexInfo *indexInfo); +extern void yb_index_update(Relation indexRelation, + Datum *values, bool *isnull, + Datum oldYbctid, Datum newYbctid, + Relation heapRelation, + struct IndexInfo *indexInfo); extern IndexScanDesc index_beginscan(Relation heapRelation, Relation indexRelation, diff --git a/src/postgres/src/include/executor/executor.h b/src/postgres/src/include/executor/executor.h index 2997c3c5afad..907145b7083c 100644 --- a/src/postgres/src/include/executor/executor.h +++ b/src/postgres/src/include/executor/executor.h @@ -595,6 +595,14 @@ extern List *ExecInsertIndexTuplesOptimized(TupleTableSlot *slot, HeapTuple tupl extern void ExecDeleteIndexTuples(Datum ybctid, HeapTuple tuple, EState *estate); extern void ExecDeleteIndexTuplesOptimized(Datum ybctid, HeapTuple tuple, EState *estate); +extern List *YbExecUpdateIndexTuples(TupleTableSlot *slot, + Datum ybctid, + HeapTuple oldtuple, + HeapTuple tuple, + EState *estate, + Bitmapset *updatedCols, + bool is_pk_updated); + extern bool ExecCheckIndexConstraints(TupleTableSlot *slot, EState *estate, ItemPointer conflictTid, List *arbiterIndexes, TupleTableSlot **ybConflictSlot); diff --git a/src/postgres/src/include/executor/ybcModifyTable.h b/src/postgres/src/include/executor/ybcModifyTable.h index 7e24ee9367e3..6407489d4304 100644 --- a/src/postgres/src/include/executor/ybcModifyTable.h +++ b/src/postgres/src/include/executor/ybcModifyTable.h @@ -51,6 +51,14 @@ typedef void (*yb_bind_for_write_function) (YBCPgStatement stmt, Datum ybbasectid, bool ybctid_as_value); +typedef void (*yb_assign_for_write_function) (YBCPgStatement stmt, + Relation index, + Datum *values, + bool *isnull, + int natts, + Datum old_ybbasectid, + Datum new_ybbasectid); + /* * Insert data into YugaByte table. * This function is equivalent to "heap_insert", but it sends data to DocDB (YugaByte storage). @@ -145,6 +153,13 @@ extern void YBCExecuteDeleteIndex(Relation index, yb_bind_for_write_function callback, void *indexstate); +extern void YBCExecuteUpdateIndex(Relation index, + Datum *values, + bool *isnull, + Datum oldYbctid, + Datum newYbctid, + yb_assign_for_write_function callback); + /* * Update a row (identified by ybctid) in a YugaByte table. * If this is a single row op we will return false in the case that there was diff --git a/src/postgres/src/include/utils/relcache.h b/src/postgres/src/include/utils/relcache.h index 9e9551ef9e1c..7fbb93df649c 100644 --- a/src/postgres/src/include/utils/relcache.h +++ b/src/postgres/src/include/utils/relcache.h @@ -69,6 +69,10 @@ extern void YbComputeIndexExprOrPredicateAttrs(Bitmapset **indexattrs, AttrNumber attr_offset); extern bool CheckIndexForUpdate(Oid indexoid, const Bitmapset *updated_attrs, AttrNumber attr_offset); +extern bool CheckUpdateExprOrPred(const Bitmapset *updated_attrs, + Relation indexDesc, + const int Anum_pg_index, + AttrNumber attr_offset); extern void RelationGetExclusionInfo(Relation indexRelation, Oid **operators, diff --git a/src/postgres/src/test/regress/expected/yb_lock_status.out b/src/postgres/src/test/regress/expected/yb_lock_status.out index 818fe5fd734d..3962e56e0433 100644 --- a/src/postgres/src/test/regress/expected/yb_lock_status.out +++ b/src/postgres/src/test/regress/expected/yb_lock_status.out @@ -217,14 +217,12 @@ SELECT * FROM validate_and_return_lock_status('yb_lock_tests_k1_k2'::regclass, n locktype | relation | mode | granted | fastpath | valid_waitstart | valid_waitend | has_node | has_tablet_id | has_transaction_id | valid_subtransaction_id | has_status_tablet_id | is_explicit | hash_cols | range_cols | attnum | column_id | multiple_rows_locked | num_blocking ----------+---------------------+----------------------------+---------+----------+-----------------+---------------+----------+---------------+--------------------+-------------------------+----------------------+-------------+-----------+------------+--------+-----------+----------------------+-------------- relation | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | | | | | t | - relation | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | | | | | t | - keyrange | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | | | | t | keyrange | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | | | | t | keyrange | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1} | | | t | - keyrange | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1} | | | t | - row | yb_lock_tests_k1_k2 | {STRONG_READ,STRONG_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | | f | - row | yb_lock_tests_k1_k2 | {STRONG_READ,STRONG_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | | f | -(8 rows) + row | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | | f | + row | yb_lock_tests_k1_k2 | {STRONG_READ} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | | f | + column | yb_lock_tests_k1_k2 | {STRONG_READ,STRONG_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | 3 | f | +(6 rows) ABORT; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; diff --git a/src/postgres/src/test/regress/expected/yb_lock_status_1.out b/src/postgres/src/test/regress/expected/yb_lock_status_1.out index 375cf52363de..60ec56750bea 100644 --- a/src/postgres/src/test/regress/expected/yb_lock_status_1.out +++ b/src/postgres/src/test/regress/expected/yb_lock_status_1.out @@ -224,16 +224,12 @@ SELECT * FROM validate_and_return_lock_status('yb_lock_tests_k1_k2'::regclass, n locktype | relation | mode | granted | fastpath | valid_waitstart | valid_waitend | has_node | has_tablet_id | has_transaction_id | valid_subtransaction_id | has_status_tablet_id | is_explicit | hash_cols | range_cols | attnum | column_id | multiple_rows_locked | num_blocking ----------+---------------------+----------------------------+---------+----------+-----------------+---------------+----------+---------------+--------------------+-------------------------+----------------------+-------------+-----------+------------+--------+-----------+----------------------+-------------- relation | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | | | | | t | - relation | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | | | | | t | - keyrange | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | | | | t | keyrange | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | | | | t | keyrange | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1} | | | t | - keyrange | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1} | | | t | row | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | | f | - row | yb_lock_tests_k1_k2 | {STRONG_READ,STRONG_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | | f | - column | yb_lock_tests_k1_k2 | {STRONG_READ,STRONG_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | 0 | f | + row | yb_lock_tests_k1_k2 | {STRONG_READ} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | | f | column | yb_lock_tests_k1_k2 | {STRONG_READ,STRONG_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | 3 | f | -(10 rows) +(6 rows) ABORT; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; diff --git a/src/postgres/src/test/regress/expected/yb_lock_status_2.out b/src/postgres/src/test/regress/expected/yb_lock_status_2.out index 80d8f1a753d4..4f2e164afdb2 100644 --- a/src/postgres/src/test/regress/expected/yb_lock_status_2.out +++ b/src/postgres/src/test/regress/expected/yb_lock_status_2.out @@ -224,16 +224,12 @@ SELECT * FROM validate_and_return_lock_status('yb_lock_tests_k1_k2'::regclass, n locktype | relation | mode | granted | fastpath | valid_waitstart | valid_waitend | has_node | has_tablet_id | has_transaction_id | valid_subtransaction_id | has_status_tablet_id | is_explicit | hash_cols | range_cols | attnum | column_id | multiple_rows_locked | num_blocking ----------+---------------------+----------------------------+---------+----------+-----------------+---------------+----------+---------------+--------------------+-------------------------+----------------------+-------------+-----------+------------+--------+-----------+----------------------+-------------- relation | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | | | | | t | - relation | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | | | | | t | - keyrange | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | | | | t | keyrange | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | | | | t | keyrange | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1} | | | t | - keyrange | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1} | | | t | row | yb_lock_tests_k1_k2 | {WEAK_READ,WEAK_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | | f | - row | yb_lock_tests_k1_k2 | {STRONG_READ,STRONG_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | | f | - column | yb_lock_tests_k1_k2 | {STRONG_READ,STRONG_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | 0 | f | + row | yb_lock_tests_k1_k2 | {STRONG_READ} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | | f | column | yb_lock_tests_k1_k2 | {STRONG_READ,STRONG_WRITE} | t | f | | t | t | t | t | t | f | f | {1} | {1,"null"} | | 13 | f | -(10 rows) +(6 rows) ABORT; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; diff --git a/src/postgres/src/test/regress/expected/yb_update_optimize_index_updates.out b/src/postgres/src/test/regress/expected/yb_update_optimize_index_updates.out new file mode 100644 index 000000000000..fcddc8ef863a --- /dev/null +++ b/src/postgres/src/test/regress/expected/yb_update_optimize_index_updates.out @@ -0,0 +1,1712 @@ +SET yb_fetch_row_limit TO 1024; +SET yb_explain_hide_non_deterministic_fields TO true; +-- +-- Tests to validate index updates in a table with no primary key. +-- +DROP TABLE IF EXISTS no_pkey_table; +NOTICE: table "no_pkey_table" does not exist, skipping +CREATE TABLE no_pkey_table (v1 INT, v2 INT, v3 INT, v4 INT); +CREATE INDEX NONCONCURRENTLY no_pkey_v1 ON no_pkey_table (v1 HASH); +CREATE INDEX NONCONCURRENTLY no_pkey_v2_hash_v3 ON no_pkey_table (v2 HASH) INCLUDE (v3); +CREATE INDEX NONCONCURRENTLY no_pkey_v2_range_v3 ON no_pkey_table (v2 ASC) INCLUDE (v3); +INSERT INTO no_pkey_table (SELECT i, i, i, i FROM generate_series(1, 10) AS i); +-- Updating a column with no indexes should not require index writes +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE no_pkey_table SET v4 = v4 + 1 WHERE v1 = 1; + QUERY PLAN +---------------------------------------------------------------------------- + Update on no_pkey_table (actual rows=0 loops=1) + -> Index Scan using no_pkey_v1 on no_pkey_table (actual rows=1 loops=1) + Index Cond: (v1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Index Read Requests: 1 + Storage Index Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Read Requests: 2 + Storage Rows Scanned: 2 + Storage Write Requests: 1 + Storage Flush Requests: 1 +(12 rows) + +-- Updating the key columns of an index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE no_pkey_table SET v2 = v2 + 1 WHERE v1 = 1; + QUERY PLAN +---------------------------------------------------------------------------- + Update on no_pkey_table (actual rows=0 loops=1) + -> Index Scan using no_pkey_v1 on no_pkey_table (actual rows=1 loops=1) + Index Cond: (v1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Index Read Requests: 1 + Storage Index Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 4 + Storage Read Requests: 2 + Storage Rows Scanned: 2 + Storage Write Requests: 5 + Storage Flush Requests: 1 +(13 rows) + +-- Updating non-key columns of an index should only require an UPDATE +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE no_pkey_table SET v3 = v3 + 1 WHERE v1 = 1; + QUERY PLAN +---------------------------------------------------------------------------- + Update on no_pkey_table (actual rows=0 loops=1) + -> Index Scan using no_pkey_v1 on no_pkey_table (actual rows=1 loops=1) + Index Cond: (v1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Index Read Requests: 1 + Storage Index Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 2 + Storage Rows Scanned: 2 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(13 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE no_pkey_table SET v3 = v3 + 1, v4 = v4 + 1 WHERE v1 = 1; + QUERY PLAN +---------------------------------------------------------------------------- + Update on no_pkey_table (actual rows=0 loops=1) + -> Index Scan using no_pkey_v1 on no_pkey_table (actual rows=1 loops=1) + Index Cond: (v1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Index Read Requests: 1 + Storage Index Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 2 + Storage Rows Scanned: 2 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(13 rows) + +-- Updating a mix of key and non-key columns of an index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE no_pkey_table SET v2 = v2 + 1, v3 = v3 + 1 WHERE v1 = 1; + QUERY PLAN +---------------------------------------------------------------------------- + Update on no_pkey_table (actual rows=0 loops=1) + -> Index Scan using no_pkey_v1 on no_pkey_table (actual rows=1 loops=1) + Index Cond: (v1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Index Read Requests: 1 + Storage Index Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 4 + Storage Read Requests: 2 + Storage Rows Scanned: 2 + Storage Write Requests: 5 + Storage Flush Requests: 1 +(13 rows) + +-- Validate the updates above using both SeqScan and IndexOnlyScan +/*+ SeqScan(no_pkey_table) */ SELECT * FROM no_pkey_table WHERE v1 = 1 ORDER BY v1; + v1 | v2 | v3 | v4 +----+----+----+---- + 1 | 3 | 4 | 3 +(1 row) + +/*+ IndexOnlyScan(no_pkey_table no_pkey_v2_hash_v3) */ SELECT v2, v3 FROM no_pkey_table WHERE v2 = 3 ORDER BY (v2, v3); + v2 | v3 +----+---- + 3 | 3 + 3 | 4 +(2 rows) + +/*+ IndexOnlyScan(no_pkey_table no_pkey_v2_range_v3) */ SELECT v2, v3 FROM no_pkey_table WHERE v2 = 3 ORDER BY (v2, v3); + v2 | v3 +----+---- + 3 | 3 + 3 | 4 +(2 rows) + +DROP TABLE IF EXISTS t_simple; +NOTICE: table "t_simple" does not exist, skipping +CREATE TABLE t_simple (k1 INT, k2 INT NULL, v1 INT, v2 INT, v3 INT, v4 INT, PRIMARY KEY (k1, k2)); +INSERT INTO t_simple (SELECT i, i, i, i, i, i FROM generate_series(1, 10) AS i); +-- +-- Vanilla tests to validate index updates in a table with a primary key. +-- +CREATE INDEX NONCONCURRENTLY simple_v1 ON t_simple (v1 HASH); +CREATE INDEX NONCONCURRENTLY simple_v2_hash_v3 ON t_simple (v2 HASH) INCLUDE (v3); +CREATE INDEX NONCONCURRENTLY simple_v2_range_v3 ON t_simple (v2 ASC) INCLUDE (v3); +-- Updating a column with no indexes should not require index writes +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v4 = v4 + 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 1 + Storage Flush Requests: 1 +(10 rows) + +-- Updating the key columns of an index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v2 + 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 4 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 5 + Storage Flush Requests: 1 +(11 rows) + +-- Updating non-key columns of an index should only require an UPDATE +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 1, v4 = v4 + 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +-- Updating a mix of key and non-key columns of an index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v2 + 1, v3 = v3 + 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 4 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 5 + Storage Flush Requests: 1 +(11 rows) + +-- Validate the updates above using both SeqScan and IndexOnlyScan +/*+ SeqScan(t_simple) */ SELECT * FROM t_simple WHERE k1 = 1 ORDER BY (k1, k2); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 1 | 1 | 1 | 3 | 4 | 3 +(1 row) + +/*+ IndexOnlyScan(t_simple simple_v2_hash_v3) */ SELECT v2, v3 FROM t_simple WHERE v2 = 3 ORDER BY (v2, v3); + v2 | v3 +----+---- + 3 | 3 + 3 | 4 +(2 rows) + +/*+ IndexOnlyScan(t_simple simple_v2_range_v3) */ SELECT v2, v3 FROM t_simple WHERE v2 = 3 ORDER BY (v2, v3); + v2 | v3 +----+---- + 3 | 3 + 3 | 4 +(2 rows) + +-- Updating the primary key columns should require a DELETE + INSERT on the main table +-- as well as on non-unique indexes. +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, v3 = v3 + 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 2 + Storage Index Write Requests: 6 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 8 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k2 + 10, v3 = v3 + 1 WHERE k1 = 11; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 11) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 2 + Storage Index Write Requests: 6 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 8 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = 12, k2 = 22, v3 = v3 + 1 WHERE k1 < 3; + QUERY PLAN +---------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Seq Scan on t_simple (actual rows=1 loops=1) + Storage Filter: (k1 < 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 10 + Storage Table Write Requests: 2 + Storage Index Write Requests: 6 + Storage Read Requests: 1 + Storage Rows Scanned: 10 + Storage Write Requests: 8 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, k2 = k2 + 10, v2 = v2 + 1 WHERE k2 = 3; + QUERY PLAN +---------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Seq Scan on t_simple (actual rows=1 loops=1) + Storage Filter: (k2 = 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 10 + Storage Table Write Requests: 2 + Storage Index Write Requests: 6 + Storage Read Requests: 1 + Storage Rows Scanned: 10 + Storage Write Requests: 8 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, k2 = k2 + 10, v2 = v2 + 1, v3 = v3 + 1 WHERE k2 = 4; + QUERY PLAN +---------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Seq Scan on t_simple (actual rows=1 loops=1) + Storage Filter: (k2 = 4) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 10 + Storage Table Write Requests: 2 + Storage Index Write Requests: 6 + Storage Read Requests: 1 + Storage Rows Scanned: 10 + Storage Write Requests: 8 + Storage Flush Requests: 1 +(11 rows) + +/*+ SeqScan(t_simple) */ SELECT * FROM t_simple WHERE k1 > 10 ORDER BY (k1, k2); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 11 | 11 | 1 | 3 | 6 | 3 + 12 | 22 | 2 | 2 | 3 | 2 + 13 | 13 | 3 | 4 | 3 | 3 + 14 | 14 | 4 | 5 | 5 | 4 +(4 rows) + +/*+ IndexOnlyScan(t_simple simple_v2_hash_v3) */ SELECT v2, v3 FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); + v2 | v3 +----+---- + 2 | 3 + 3 | 6 + 4 | 3 + 5 | 5 + 5 | 5 +(5 rows) + +/*+ IndexOnlyScan(t_simple simple_v2_range_v3) */ SELECT v2, v3 FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); + v2 | v3 +----+---- + 2 | 3 + 3 | 6 + 4 | 3 + 5 | 5 + 5 | 5 +(5 rows) + +/*+ IndexScan(simple_v2_hash_v3) */ SELECT * FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 12 | 22 | 2 | 2 | 3 | 2 + 11 | 11 | 1 | 3 | 6 | 3 + 13 | 13 | 3 | 4 | 3 | 3 + 5 | 5 | 5 | 5 | 5 | 5 + 14 | 14 | 4 | 5 | 5 | 4 +(5 rows) + +/*+ IndexScan(simple_v2_range_v3) */ SELECT * FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 12 | 22 | 2 | 2 | 3 | 2 + 11 | 11 | 1 | 3 | 6 | 3 + 13 | 13 | 3 | 4 | 3 | 3 + 5 | 5 | 5 | 5 | 5 | 5 + 14 | 14 | 4 | 5 | 5 | 4 +(5 rows) + +DROP INDEX simple_v1; +DROP INDEX simple_v2_hash_v3; +DROP INDEX simple_v2_range_v3; +-- +-- Vanilla tests to validate index updates in unique indexes. +-- +TRUNCATE t_simple; +INSERT INTO t_simple (SELECT i, i, i, i, i, i FROM generate_series(1, 10) AS i); +CREATE UNIQUE INDEX NONCONCURRENTLY simple_v2_hash_v3 ON t_simple (v2 HASH) INCLUDE (v3); +CREATE UNIQUE INDEX NONCONCURRENTLY simple_v2_range_v3 ON t_simple (v2 ASC) INCLUDE (v3); +-- Updating the key columns of a unique index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v2 + 10 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 4 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 5 + Storage Flush Requests: 1 +(11 rows) + +-- Updating non-key columns of an index should only require an UPDATE +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 1, v4 = v4 + 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +-- Updating a mix of key and non-key columns of an index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v2 + 10, v3 = v3 + 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 4 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 5 + Storage Flush Requests: 1 +(11 rows) + +-- Updating the primary key columns should require a DELETE + INSERT on the main table +-- but only an UPDATE on a non-unique index. +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, v3 = v3 + 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 2 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 4 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k2 + 10, v3 = v3 + 1 WHERE k1 = 11; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 11) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 2 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 4 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = 12, k2 = 22, v3 = v3 + 1 WHERE k1 < 3; + QUERY PLAN +---------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Seq Scan on t_simple (actual rows=1 loops=1) + Storage Filter: (k1 < 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 10 + Storage Table Write Requests: 2 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 10 + Storage Write Requests: 4 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, k2 = k2 + 10, v2 = v2 + 10 WHERE k2 = 3; + QUERY PLAN +---------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Seq Scan on t_simple (actual rows=1 loops=1) + Storage Filter: (k2 = 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 10 + Storage Table Write Requests: 2 + Storage Index Write Requests: 4 + Storage Read Requests: 1 + Storage Rows Scanned: 10 + Storage Write Requests: 6 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, k2 = k2 + 10, v2 = v2 + 10, v3 = v3 + 1 WHERE k2 = 4; + QUERY PLAN +---------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Seq Scan on t_simple (actual rows=1 loops=1) + Storage Filter: (k2 = 4) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 10 + Storage Table Write Requests: 2 + Storage Index Write Requests: 4 + Storage Read Requests: 1 + Storage Rows Scanned: 10 + Storage Write Requests: 6 + Storage Flush Requests: 1 +(11 rows) + +/*+ SeqScan(t_simple) */ SELECT * FROM t_simple WHERE k1 > 10 ORDER BY (k1, k2); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 11 | 11 | 1 | 21 | 6 | 2 + 12 | 22 | 2 | 2 | 3 | 2 + 13 | 13 | 3 | 13 | 3 | 3 + 14 | 14 | 4 | 14 | 5 | 4 +(4 rows) + +/*+ IndexOnlyScan(t_simple simple_v2_hash_v3) */ SELECT v2, v3 FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); + v2 | v3 +----+---- + 2 | 3 + 5 | 5 +(2 rows) + +/*+ IndexOnlyScan(t_simple simple_v2_range_v3) */ SELECT v2, v3 FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); + v2 | v3 +----+---- + 2 | 3 + 5 | 5 +(2 rows) + +/*+ IndexScan(simple_v2_hash_v3) */ SELECT * FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 12 | 22 | 2 | 2 | 3 | 2 + 5 | 5 | 5 | 5 | 5 | 5 +(2 rows) + +/*+ IndexScan(simple_v2_range_v3) */ SELECT * FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 12 | 22 | 2 | 2 | 3 | 2 + 5 | 5 | 5 | 5 | 5 | 5 +(2 rows) + +DROP INDEX simple_v2_hash_v3; +DROP INDEX simple_v2_range_v3; +-- +-- Tests to validate multi-column index update behavior +-- +TRUNCATE t_simple; +INSERT INTO t_simple (SELECT i, i, i, i, i, i FROM generate_series(1, 10) AS i); +CREATE INDEX NONCONCURRENTLY simple_v1_v2_v3 ON t_simple ((v1, v2) HASH) INCLUDE (v3); +-- Updating any of the key columns of the non-unique index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = v1 + 10, v2 = v2 + 10 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = v1 + 10, v3 = v3 + 1 WHERE k1 = 2; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 2) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v1 + 10, v3 = v3 + 1 WHERE k1 = 3; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +-- Similaryly, updating the primary key should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k2 + 10, v3 = v3 + 1 WHERE k1 = 4; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 4) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 2 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 4 + Storage Flush Requests: 1 +(11 rows) + +-- Updating non-key columns of the index should only require an UPDATE +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 1, v4 = v4 + 1 WHERE k1 = 5; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 5) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) + +/*+ IndexOnlyScan(t_simple simple_v1_v2_v3) */ SELECT v1, v2, v3 FROM t_simple WHERE v2 <= 5 ORDER BY (v1, v2); + v1 | v2 | v3 +----+----+---- + 4 | 4 | 5 + 5 | 5 | 6 + 12 | 2 | 3 +(3 rows) + +/*+ IndexScan(simple_v1_v2_v3) */ SELECT * FROM t_simple WHERE k1 <= 5 ORDER BY (v1, v2); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 3 | 3 | 3 | 13 | 4 | 3 + 4 | 14 | 4 | 4 | 5 | 4 + 5 | 5 | 5 | 5 | 6 | 6 + 1 | 1 | 11 | 11 | 1 | 1 + 2 | 2 | 12 | 2 | 3 | 2 +(5 rows) + +DROP INDEX simple_v1_v2_v3; +-- Create an index that has columns out of order and repeat the test above. +CREATE INDEX NONCONCURRENTLY out_of_order_v1_v2_v3 ON t_simple ((v3, v1) HASH) INCLUDE (v2); +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = v1 + 10, v2 = v2 + 10 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = v1 + 10, v3 = v3 + 1 WHERE k1 = 2; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 2) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v1 + 10, v3 = v3 + 1 WHERE k1 = 3; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k2 + 10, v2 = v2 + 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 2 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 4 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v2 + 1, v4 = v4 + 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) + +/*+ IndexOnlyScan(t_simple out_of_order_v1_v2_v3) */ SELECT v1, v2, v3 FROM t_simple WHERE v2 <= 5 ORDER BY (v1, v2); + v1 | v2 | v3 +----+----+---- + 4 | 4 | 5 + 5 | 5 | 6 + 22 | 2 | 4 +(3 rows) + +/*+ IndexScan(out_of_order_v1_v2_v3) */ SELECT * FROM t_simple WHERE k1 <= 5 ORDER BY (v1, v2); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 3 | 3 | 3 | 13 | 5 | 3 + 4 | 14 | 4 | 4 | 5 | 4 + 5 | 5 | 5 | 5 | 6 | 6 + 1 | 11 | 21 | 23 | 1 | 2 + 2 | 2 | 22 | 2 | 4 | 2 +(5 rows) + +DROP INDEX out_of_order_v1_v2_v3; +-- Range indexes +-- +-- Tests to validate index update behavior when the index contains NULL values +-- +CREATE INDEX NONCONCURRENTLY simple_v1_v2_v3 ON t_simple (v1, v2) INCLUDE (v3); +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = NULL WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = NULL, v3 = NULL WHERE k1 = 2; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 2) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = NULL, v4 = NULL WHERE k1 = 3; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = NULL WHERE k1 = 4; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 4) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = NULL, v2 = NULL, v3 = NULL WHERE k1 = 5; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 5) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +/*+ IndexOnlyScan(t_simple simple_v1_v2_v3) */ SELECT v1, v2, v3 FROM t_simple WHERE v1 <= 25 OR v1 IS NULL ORDER BY (v1, v2); + v1 | v2 | v3 +----+----+---- + 3 | 13 | + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 + 9 | 9 | 9 + 10 | 10 | 10 + 21 | | 1 + 22 | | + | 4 | 5 + | | +(10 rows) + +/*+ IndexScan(simple_v1_v2_v3) */ SELECT * FROM t_simple WHERE k1 <= 5 ORDER BY (v1, v2); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 3 | 3 | 3 | 13 | | + 1 | 11 | 21 | | 1 | 2 + 2 | 2 | 22 | | | 2 + 4 | 14 | | 4 | 5 | 4 + 5 | 5 | | | | 6 +(5 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = 2, v3 = k1 WHERE k1 = 2; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 2) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v4 = k1 - k2 + v1, v3 = 3 WHERE k1 = 3; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 4 WHERE k1 = 4; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 4) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 5, v2 = 5, v3 = 5 WHERE k1 = 5; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 5) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +/*+ IndexOnlyScan(t_simple simple_v1_v2_v3) */ SELECT v1, v2, v3 FROM t_simple WHERE v1 <= 25 OR v1 IS NULL ORDER BY (v1, v2); + v1 | v2 | v3 +----+----+---- + 3 | 13 | 3 + 4 | 4 | 5 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 + 9 | 9 | 9 + 10 | 10 | 10 + 21 | 1 | 1 + 22 | 2 | 2 +(10 rows) + +/*+ IndexScan(simple_v1_v2_v3) */ SELECT * FROM t_simple WHERE k1 <= 5 ORDER BY (v1, v2); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 3 | 3 | 3 | 13 | 3 | 3 + 4 | 14 | 4 | 4 | 5 | 4 + 5 | 5 | 5 | 5 | 5 | 6 + 1 | 11 | 21 | 1 | 1 | 2 + 2 | 2 | 22 | 2 | 2 | 2 +(5 rows) + +DROP INDEX simple_v1_v2_v3; +-- Create a unique index with nullable values and repeat the tests above. +CREATE INDEX NONCONCURRENTLY simple_unique_v1_v2_v3 ON t_simple (v1, v2) INCLUDE (v3); +-- Setting any of the primary key columns to NULL should be done via a single UPDATE +-- Setting any of the secondary index key columns to NULL will still require the delete + update +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = NULL WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = NULL, v3 = NULL WHERE k1 = 2; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 2) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = NULL, v4 = NULL WHERE k1 = 3; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = NULL WHERE k1 = 4; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 4) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = NULL, v2 = NULL, v3 = NULL WHERE k1 = 5; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 5) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +/*+ IndexOnlyScan(t_simple simple_unique_v1_v2_v3) */ SELECT v1, v2, v3 FROM t_simple WHERE v1 <= 25 OR v1 IS NULL ORDER BY (v1, v2); + v1 | v2 | v3 +----+----+---- + 3 | 13 | + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 + 9 | 9 | 9 + 10 | 10 | 10 + 21 | | 1 + 22 | | + | 4 | 5 + | | +(10 rows) + +/*+ IndexScan(simple_unique_v1_v2_v3) */ SELECT * FROM t_simple WHERE k1 <= 5 ORDER BY (v1, v2); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 3 | 3 | 3 | 13 | | + 1 | 11 | 21 | | 1 | 2 + 2 | 2 | 22 | | | 2 + 4 | 14 | | 4 | 5 | 4 + 5 | 5 | | | | 6 +(5 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = 1 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = 2, v3 = k1 WHERE k1 = 2; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 2) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v4 = k1 - k2 + v1, v3 = 3 WHERE k1 = 3; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 4 WHERE k1 = 4; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 4) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 5, v2 = 5, v3 = 5 WHERE k1 = 5; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 5) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +/*+ IndexOnlyScan(t_simple simple_unique_v1_v2_v3) */ SELECT v1, v2, v3 FROM t_simple WHERE v1 <= 25 OR v1 IS NULL ORDER BY (v1, v2); + v1 | v2 | v3 +----+----+---- + 3 | 13 | 3 + 4 | 4 | 5 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 + 9 | 9 | 9 + 10 | 10 | 10 + 21 | 1 | 1 + 22 | 2 | 2 +(10 rows) + +/*+ IndexScan(simple_unique_v1_v2_v3) */ SELECT * FROM t_simple WHERE k1 <= 5 ORDER BY (v1, v2); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 3 | 3 | 3 | 13 | 3 | 3 + 4 | 14 | 4 | 4 | 5 | 4 + 5 | 5 | 5 | 5 | 5 | 6 + 1 | 11 | 21 | 1 | 1 | 2 + 2 | 2 | 22 | 2 | 2 | 2 +(5 rows) + +DROP INDEX simple_unique_v1_v2_v3; +-- +-- Tests to validate index update behavior for partial indexes +-- +TRUNCATE t_simple; +INSERT INTO t_simple (SELECT i, i, i, i, i, i FROM generate_series(1, 10) AS i); +CREATE INDEX NONCONCURRENTLY simple_partial_or ON t_simple (v1, v2) INCLUDE (v3) WHERE v1 < 5 OR v2 < 10; +CREATE INDEX NONCONCURRENTLY simple_partial_and ON t_simple (v1, v2) INCLUDE (v3) WHERE v1 < 5 AND v2 < 10; +-- The row must be deleted from both indexes +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 11, v2 = 10 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +-- The rows must be deleted from the AND index but not the OR index +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 12, v2 = 8 WHERE k1 = 2; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 2) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 3 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 4 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = 15 WHERE k1 = 3; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 3 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 4 + Storage Flush Requests: 1 +(11 rows) + +-- Modifying the INCLUDE columns should have no impact on the index +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 10 WHERE k1 = 6; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 6) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) + +-- The row must be inserted into both indexes +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 5, v2 = -5 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) + +-- The row must be inserted into one of the indexes but not the other +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 4 WHERE k1 = 2; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 2) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 3 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 4 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 4, v2 = 8, v3 = 100 WHERE k1 = 7; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 7) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 3 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 4 + Storage Flush Requests: 1 +(11 rows) + +-- Modifying the primary key columns should neither delete nor insert into the index +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k1 + 10, v3 = v3 + 1 WHERE k1 = 8; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 8) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 2 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 4 + Storage Flush Requests: 1 +(11 rows) + +/*+ IndexOnlyScan(t_simple simple_partial_or) */ SELECT v1, v2, v3 FROM t_simple WHERE v1 < 5 OR v2 < 10 ORDER BY (v1, v2); + v1 | v2 | v3 +----+----+----- + 3 | 15 | 3 + 4 | 4 | 4 + 4 | 8 | 100 + 4 | 8 | 2 + 5 | -5 | 1 + 5 | 5 | 5 + 6 | 6 | 16 + 8 | 8 | 9 + 9 | 9 | 9 +(9 rows) + +/*+ IndexScan(simple_unique_v1_v2_v3) */ SELECT * FROM t_simple WHERE v1 < 5 OR v2 < 10 ORDER BY (v1, v2); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+-----+---- + 3 | 3 | 3 | 15 | 3 | 3 + 4 | 4 | 4 | 4 | 4 | 4 + 2 | 2 | 4 | 8 | 2 | 2 + 7 | 7 | 4 | 8 | 100 | 7 + 1 | 1 | 5 | -5 | 1 | 1 + 5 | 5 | 5 | 5 | 5 | 5 + 6 | 6 | 6 | 6 | 16 | 6 + 8 | 18 | 8 | 8 | 9 | 8 + 9 | 9 | 9 | 9 | 9 | 9 +(9 rows) + +/*+ IndexOnlyScan(t_simple simple_partial_and) */ SELECT v1, v2, v3 FROM t_simple WHERE v1 < 5 AND v2 < 10 ORDER BY (v1, v2); + v1 | v2 | v3 +----+----+----- + 4 | 4 | 4 + 4 | 8 | 100 + 4 | 8 | 2 +(3 rows) + +/*+ IndexScan(simple_unique_v1_v2_v3) */ SELECT * FROM t_simple WHERE v1 < 5 AND v2 < 10 ORDER BY (v1, v2); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+-----+---- + 4 | 4 | 4 | 4 | 4 | 4 + 7 | 7 | 4 | 8 | 100 | 7 + 2 | 2 | 4 | 8 | 2 | 2 +(3 rows) + +SELECT * FROM t_simple; + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+-----+---- + 5 | 5 | 5 | 5 | 5 | 5 + 1 | 1 | 5 | -5 | 1 | 1 + 6 | 6 | 6 | 6 | 16 | 6 + 7 | 7 | 4 | 8 | 100 | 7 + 9 | 9 | 9 | 9 | 9 | 9 + 10 | 10 | 10 | 10 | 10 | 10 + 4 | 4 | 4 | 4 | 4 | 4 + 2 | 2 | 4 | 8 | 2 | 2 + 8 | 18 | 8 | 8 | 9 | 8 + 3 | 3 | 3 | 15 | 3 | 3 +(10 rows) + +DROP INDEX simple_partial_or; +DROP INDEX simple_partial_and; +-- +-- Tests to validate index update behavior for expression indexes +-- +TRUNCATE t_simple; +INSERT INTO t_simple (SELECT i, i, i, i, i, i FROM generate_series(1, 10) AS i); +CREATE UNIQUE INDEX NONCONCURRENTLY simple_expr ON t_simple ((v1 + 10)) INCLUDE (v3); +-- Updating any of the columns making up the expression should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 11 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +-- Any other column update should only require an UPDATE +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k2 + 1 WHERE k1 = 2; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 2) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 2 + Storage Index Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10 WHERE k1 = 3; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 2 + Storage Index Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k2 + 5, v3 = v3 + 5 WHERE k1 = 4; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 4) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 2 + Storage Index Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +/*+ IndexScan(simple_expr) */ SELECT *, v1 + 10 FROM t_simple WHERE v1 + 10 IN (21, 12, 13, 14, 15, 16, 17, 18, 19, 20) ORDER BY (v1, v2); + k1 | k2 | v1 | v2 | v3 | v4 | ?column? +----+----+----+----+----+----+---------- + 2 | 3 | 2 | 2 | 2 | 2 | 12 + 13 | 3 | 3 | 3 | 3 | 3 | 13 + 4 | 9 | 4 | 4 | 9 | 4 | 14 + 5 | 5 | 5 | 5 | 5 | 5 | 15 + 6 | 6 | 6 | 6 | 6 | 6 | 16 + 7 | 7 | 7 | 7 | 7 | 7 | 17 + 8 | 8 | 8 | 8 | 8 | 8 | 18 + 9 | 9 | 9 | 9 | 9 | 9 | 19 + 10 | 10 | 10 | 10 | 10 | 10 | 20 + 1 | 1 | 11 | 1 | 1 | 1 | 21 +(10 rows) + +SELECT * FROM t_simple; + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 5 | 5 | 5 | 5 | 5 | 5 + 1 | 1 | 11 | 1 | 1 | 1 + 13 | 3 | 3 | 3 | 3 | 3 + 6 | 6 | 6 | 6 | 6 | 6 + 7 | 7 | 7 | 7 | 7 | 7 + 9 | 9 | 9 | 9 | 9 | 9 + 10 | 10 | 10 | 10 | 10 | 10 + 4 | 9 | 4 | 4 | 9 | 4 + 2 | 3 | 2 | 2 | 2 | 2 + 8 | 8 | 8 | 8 | 8 | 8 +(10 rows) + +-- CREATE INDEX NONCONCURRENTLY complex_expr_v2 ON t_simple ((v1 * v2), v3) INCLUDE (v4); +-- +-- Tests to validate multiple include columns +-- +TRUNCATE t_simple; +INSERT INTO t_simple (SELECT i, i, i, i, i, i FROM generate_series(1, 10) AS i); +CREATE UNIQUE INDEX NONCONCURRENTLY multi_include ON t_simple (v1, v2) INCLUDE (v4, v3, v2); +-- Updating any of the key columns should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 11 WHERE k1 = 1; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 1) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 4 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 5 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v2 + 1 WHERE k1 = 2; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 2) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = v2 + 10, v2 = v1 + 1 WHERE k1 = 3; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 3) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 4 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 5 + Storage Flush Requests: 1 +(11 rows) + +-- Any other column update should only require an UPDATE +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 1 WHERE k1 = 4; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 4) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v4, v4 = v3 WHERE k1 = 5; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 5) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 3 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v4 = v2 + 1 WHERE k1 = 6; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 6) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 1 + Storage Index Write Requests: 1 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, k2 = k2 + 1 WHERE k1 = 7; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 7) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 2 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 4 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, k2 = k2 + 1 WHERE k1 = 8; + QUERY PLAN +-------------------------------------------------------------------------- + Update on t_simple (actual rows=0 loops=1) + -> Index Scan using t_simple_pkey on t_simple (actual rows=1 loops=1) + Index Cond: (k1 = 8) + Storage Table Read Requests: 1 + Storage Table Rows Scanned: 1 + Storage Table Write Requests: 2 + Storage Index Write Requests: 2 + Storage Read Requests: 1 + Storage Rows Scanned: 1 + Storage Write Requests: 4 + Storage Flush Requests: 1 +(11 rows) + +EXPLAIN (VERBOSE) /*+ IndexOnlyScan(t_simple multi_include) */ SELECT v1, v2, v3, v4 FROM t_simple ORDER BY (v1, v2); +WARNING: GUC yb_explain_hide_non_deterministic_fields disables EXPLAIN option VERBOSE + QUERY PLAN +----------------------------------------------------------------------------------------------- + Sort (cost=163.83..166.33 rows=1000 width=48) + Sort Key: (ROW(v1, v2)) + -> Index Only Scan using multi_include on t_simple (cost=0.00..114.00 rows=1000 width=48) +(3 rows) + +/*+ IndexOnlyScan(t_simple multi_include) */ SELECT v1, v2, v3, v4 FROM t_simple ORDER BY (v1, v2); + v1 | v2 | v3 | v4 +----+----+----+---- + 2 | 3 | 2 | 2 + 4 | 4 | 5 | 4 + 5 | 5 | 5 | 5 + 6 | 6 | 6 | 7 + 7 | 7 | 7 | 7 + 8 | 8 | 8 | 8 + 9 | 9 | 9 | 9 + 10 | 10 | 10 | 10 + 11 | 1 | 1 | 1 + 13 | 4 | 3 | 3 +(10 rows) + +EXPLAIN (VERBOSE) /*+ IndexScan(multi_include) */ SELECT * FROM t_simple ORDER BY (v1, v2); +WARNING: GUC yb_explain_hide_non_deterministic_fields disables EXPLAIN option VERBOSE + QUERY PLAN +-------------------------------------------------------------------- + Sort (cost=149.83..152.33 rows=1000 width=56) + Sort Key: (ROW(v1, v2)) + -> Seq Scan on t_simple (cost=0.00..100.00 rows=1000 width=56) +(3 rows) + +/*+ IndexScan(multi_include) */ SELECT * FROM t_simple ORDER BY (v1, v2); + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 2 | 2 | 2 | 3 | 2 | 2 + 4 | 4 | 4 | 4 | 5 | 4 + 5 | 5 | 5 | 5 | 5 | 5 + 6 | 6 | 6 | 6 | 6 | 7 + 17 | 8 | 7 | 7 | 7 | 7 + 18 | 9 | 8 | 8 | 8 | 8 + 9 | 9 | 9 | 9 | 9 | 9 + 10 | 10 | 10 | 10 | 10 | 10 + 1 | 1 | 11 | 1 | 1 | 1 + 3 | 3 | 13 | 4 | 3 | 3 +(10 rows) + +SELECT * FROM t_simple; + k1 | k2 | v1 | v2 | v3 | v4 +----+----+----+----+----+---- + 5 | 5 | 5 | 5 | 5 | 5 + 1 | 1 | 11 | 1 | 1 | 1 + 6 | 6 | 6 | 6 | 6 | 7 + 9 | 9 | 9 | 9 | 9 | 9 + 17 | 8 | 7 | 7 | 7 | 7 + 10 | 10 | 10 | 10 | 10 | 10 + 4 | 4 | 4 | 4 | 5 | 4 + 18 | 9 | 8 | 8 | 8 | 8 + 2 | 2 | 2 | 3 | 2 | 2 + 3 | 3 | 13 | 4 | 3 | 3 +(10 rows) diff --git a/src/postgres/src/test/regress/expected/yb_update_optimize_indices.out b/src/postgres/src/test/regress/expected/yb_update_optimize_indices.out index a7f38c54db9b..d1da4df9fae4 100644 --- a/src/postgres/src/test/regress/expected/yb_update_optimize_indices.out +++ b/src/postgres/src/test/regress/expected/yb_update_optimize_indices.out @@ -1311,13 +1311,12 @@ EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE text_table SET v4 = 'abcde'::char(8) | Storage Table Read Requests: 1 Storage Table Rows Scanned: 1 Storage Table Write Requests: 1 - Storage Index Write Requests: 2 - Storage Flush Requests: 1 + Storage Index Write Requests: 1 Storage Read Requests: 1 Storage Rows Scanned: 1 - Storage Write Requests: 3 - Storage Flush Requests: 2 -(12 rows) + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) SELECT * FROM text_table; h | v1 | v2 | v3 | v4 @@ -1453,13 +1452,12 @@ EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE composite_table SET c1.v1 = (c1).v2 + Storage Table Read Requests: 1 Storage Table Rows Scanned: 1 Storage Table Write Requests: 1 - Storage Index Write Requests: 2 - Storage Flush Requests: 1 + Storage Index Write Requests: 1 Storage Read Requests: 1 Storage Rows Scanned: 1 - Storage Write Requests: 3 - Storage Flush Requests: 2 -(12 rows) + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE composite_table SET v = v + 1, c1.v3 = 'some-text' WHERE h = 1; QUERY PLAN @@ -1502,13 +1500,12 @@ EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE composite_table SET c1.v2 = 123.451, c Storage Table Read Requests: 1 Storage Table Rows Scanned: 1 Storage Table Write Requests: 1 - Storage Index Write Requests: 2 - Storage Flush Requests: 1 + Storage Index Write Requests: 1 Storage Read Requests: 1 Storage Rows Scanned: 1 - Storage Write Requests: 3 - Storage Flush Requests: 2 -(12 rows) + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE composite_table SET c1 = ROW((v) + 100, (c1).v1 + 123.45, 'someother-text', '{"a": 2, "b": 1}'::jsonb, '{"d": 4, "c": 3}'::bytea) WHERE h = 1; QUERY PLAN @@ -1519,13 +1516,12 @@ EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE composite_table SET c1 = ROW((v) + 100 Storage Table Read Requests: 1 Storage Table Rows Scanned: 1 Storage Table Write Requests: 1 - Storage Index Write Requests: 2 - Storage Flush Requests: 1 + Storage Index Write Requests: 1 Storage Read Requests: 1 Storage Rows Scanned: 1 - Storage Write Requests: 3 - Storage Flush Requests: 2 -(12 rows) + Storage Write Requests: 2 + Storage Flush Requests: 1 +(11 rows) SELECT * FROM composite_table; h | v | c1 diff --git a/src/postgres/src/test/regress/sql/yb_update_optimize_index_updates.sql b/src/postgres/src/test/regress/sql/yb_update_optimize_index_updates.sql new file mode 100644 index 000000000000..10e6e94bb669 --- /dev/null +++ b/src/postgres/src/test/regress/sql/yb_update_optimize_index_updates.sql @@ -0,0 +1,263 @@ +SET yb_fetch_row_limit TO 1024; +SET yb_explain_hide_non_deterministic_fields TO true; + +-- +-- Tests to validate index updates in a table with no primary key. +-- +DROP TABLE IF EXISTS no_pkey_table; +CREATE TABLE no_pkey_table (v1 INT, v2 INT, v3 INT, v4 INT); +CREATE INDEX NONCONCURRENTLY no_pkey_v1 ON no_pkey_table (v1 HASH); +CREATE INDEX NONCONCURRENTLY no_pkey_v2_hash_v3 ON no_pkey_table (v2 HASH) INCLUDE (v3); +CREATE INDEX NONCONCURRENTLY no_pkey_v2_range_v3 ON no_pkey_table (v2 ASC) INCLUDE (v3); + +INSERT INTO no_pkey_table (SELECT i, i, i, i FROM generate_series(1, 10) AS i); + +-- Updating a column with no indexes should not require index writes +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE no_pkey_table SET v4 = v4 + 1 WHERE v1 = 1; +-- Updating the key columns of an index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE no_pkey_table SET v2 = v2 + 1 WHERE v1 = 1; +-- Updating non-key columns of an index should only require an UPDATE +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE no_pkey_table SET v3 = v3 + 1 WHERE v1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE no_pkey_table SET v3 = v3 + 1, v4 = v4 + 1 WHERE v1 = 1; +-- Updating a mix of key and non-key columns of an index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE no_pkey_table SET v2 = v2 + 1, v3 = v3 + 1 WHERE v1 = 1; + +-- Validate the updates above using both SeqScan and IndexOnlyScan +/*+ SeqScan(no_pkey_table) */ SELECT * FROM no_pkey_table WHERE v1 = 1 ORDER BY v1; +/*+ IndexOnlyScan(no_pkey_table no_pkey_v2_hash_v3) */ SELECT v2, v3 FROM no_pkey_table WHERE v2 = 3 ORDER BY (v2, v3); +/*+ IndexOnlyScan(no_pkey_table no_pkey_v2_range_v3) */ SELECT v2, v3 FROM no_pkey_table WHERE v2 = 3 ORDER BY (v2, v3); + +DROP TABLE IF EXISTS t_simple; +CREATE TABLE t_simple (k1 INT, k2 INT NULL, v1 INT, v2 INT, v3 INT, v4 INT, PRIMARY KEY (k1, k2)); +INSERT INTO t_simple (SELECT i, i, i, i, i, i FROM generate_series(1, 10) AS i); + +-- +-- Vanilla tests to validate index updates in a table with a primary key. +-- +CREATE INDEX NONCONCURRENTLY simple_v1 ON t_simple (v1 HASH); +CREATE INDEX NONCONCURRENTLY simple_v2_hash_v3 ON t_simple (v2 HASH) INCLUDE (v3); +CREATE INDEX NONCONCURRENTLY simple_v2_range_v3 ON t_simple (v2 ASC) INCLUDE (v3); + +-- Updating a column with no indexes should not require index writes +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v4 = v4 + 1 WHERE k1 = 1; +-- Updating the key columns of an index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v2 + 1 WHERE k1 = 1; +-- Updating non-key columns of an index should only require an UPDATE +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 1 WHERE k1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 1, v4 = v4 + 1 WHERE k1 = 1; +-- Updating a mix of key and non-key columns of an index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v2 + 1, v3 = v3 + 1 WHERE k1 = 1; + +-- Validate the updates above using both SeqScan and IndexOnlyScan +/*+ SeqScan(t_simple) */ SELECT * FROM t_simple WHERE k1 = 1 ORDER BY (k1, k2); +/*+ IndexOnlyScan(t_simple simple_v2_hash_v3) */ SELECT v2, v3 FROM t_simple WHERE v2 = 3 ORDER BY (v2, v3); +/*+ IndexOnlyScan(t_simple simple_v2_range_v3) */ SELECT v2, v3 FROM t_simple WHERE v2 = 3 ORDER BY (v2, v3); + +-- Updating the primary key columns should require a DELETE + INSERT on the main table +-- as well as on non-unique indexes. +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, v3 = v3 + 1 WHERE k1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k2 + 10, v3 = v3 + 1 WHERE k1 = 11; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = 12, k2 = 22, v3 = v3 + 1 WHERE k1 < 3; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, k2 = k2 + 10, v2 = v2 + 1 WHERE k2 = 3; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, k2 = k2 + 10, v2 = v2 + 1, v3 = v3 + 1 WHERE k2 = 4; + +/*+ SeqScan(t_simple) */ SELECT * FROM t_simple WHERE k1 > 10 ORDER BY (k1, k2); +/*+ IndexOnlyScan(t_simple simple_v2_hash_v3) */ SELECT v2, v3 FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); +/*+ IndexOnlyScan(t_simple simple_v2_range_v3) */ SELECT v2, v3 FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); +/*+ IndexScan(simple_v2_hash_v3) */ SELECT * FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); +/*+ IndexScan(simple_v2_range_v3) */ SELECT * FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); + +DROP INDEX simple_v1; +DROP INDEX simple_v2_hash_v3; +DROP INDEX simple_v2_range_v3; + +-- +-- Vanilla tests to validate index updates in unique indexes. +-- +TRUNCATE t_simple; +INSERT INTO t_simple (SELECT i, i, i, i, i, i FROM generate_series(1, 10) AS i); +CREATE UNIQUE INDEX NONCONCURRENTLY simple_v2_hash_v3 ON t_simple (v2 HASH) INCLUDE (v3); +CREATE UNIQUE INDEX NONCONCURRENTLY simple_v2_range_v3 ON t_simple (v2 ASC) INCLUDE (v3); + +-- Updating the key columns of a unique index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v2 + 10 WHERE k1 = 1; +-- Updating non-key columns of an index should only require an UPDATE +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 1 WHERE k1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 1, v4 = v4 + 1 WHERE k1 = 1; +-- Updating a mix of key and non-key columns of an index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v2 + 10, v3 = v3 + 1 WHERE k1 = 1; + +-- Updating the primary key columns should require a DELETE + INSERT on the main table +-- but only an UPDATE on a non-unique index. +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, v3 = v3 + 1 WHERE k1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k2 + 10, v3 = v3 + 1 WHERE k1 = 11; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = 12, k2 = 22, v3 = v3 + 1 WHERE k1 < 3; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, k2 = k2 + 10, v2 = v2 + 10 WHERE k2 = 3; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, k2 = k2 + 10, v2 = v2 + 10, v3 = v3 + 1 WHERE k2 = 4; + +/*+ SeqScan(t_simple) */ SELECT * FROM t_simple WHERE k1 > 10 ORDER BY (k1, k2); +/*+ IndexOnlyScan(t_simple simple_v2_hash_v3) */ SELECT v2, v3 FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); +/*+ IndexOnlyScan(t_simple simple_v2_range_v3) */ SELECT v2, v3 FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); +/*+ IndexScan(simple_v2_hash_v3) */ SELECT * FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); +/*+ IndexScan(simple_v2_range_v3) */ SELECT * FROM t_simple WHERE v2 <= 5 ORDER BY (v2, v3); + +DROP INDEX simple_v2_hash_v3; +DROP INDEX simple_v2_range_v3; + +-- +-- Tests to validate multi-column index update behavior +-- +TRUNCATE t_simple; +INSERT INTO t_simple (SELECT i, i, i, i, i, i FROM generate_series(1, 10) AS i); +CREATE INDEX NONCONCURRENTLY simple_v1_v2_v3 ON t_simple ((v1, v2) HASH) INCLUDE (v3); +-- Updating any of the key columns of the non-unique index should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = v1 + 10, v2 = v2 + 10 WHERE k1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = v1 + 10, v3 = v3 + 1 WHERE k1 = 2; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v1 + 10, v3 = v3 + 1 WHERE k1 = 3; +-- Similaryly, updating the primary key should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k2 + 10, v3 = v3 + 1 WHERE k1 = 4; +-- Updating non-key columns of the index should only require an UPDATE +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 1, v4 = v4 + 1 WHERE k1 = 5; + +/*+ IndexOnlyScan(t_simple simple_v1_v2_v3) */ SELECT v1, v2, v3 FROM t_simple WHERE v2 <= 5 ORDER BY (v1, v2); +/*+ IndexScan(simple_v1_v2_v3) */ SELECT * FROM t_simple WHERE k1 <= 5 ORDER BY (v1, v2); + +DROP INDEX simple_v1_v2_v3; + +-- Create an index that has columns out of order and repeat the test above. +CREATE INDEX NONCONCURRENTLY out_of_order_v1_v2_v3 ON t_simple ((v3, v1) HASH) INCLUDE (v2); +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = v1 + 10, v2 = v2 + 10 WHERE k1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = v1 + 10, v3 = v3 + 1 WHERE k1 = 2; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v1 + 10, v3 = v3 + 1 WHERE k1 = 3; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k2 + 10, v2 = v2 + 1 WHERE k1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v2 + 1, v4 = v4 + 1 WHERE k1 = 1; + +/*+ IndexOnlyScan(t_simple out_of_order_v1_v2_v3) */ SELECT v1, v2, v3 FROM t_simple WHERE v2 <= 5 ORDER BY (v1, v2); +/*+ IndexScan(out_of_order_v1_v2_v3) */ SELECT * FROM t_simple WHERE k1 <= 5 ORDER BY (v1, v2); + +DROP INDEX out_of_order_v1_v2_v3; + +-- Range indexes + +-- +-- Tests to validate index update behavior when the index contains NULL values +-- +CREATE INDEX NONCONCURRENTLY simple_v1_v2_v3 ON t_simple (v1, v2) INCLUDE (v3); +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = NULL WHERE k1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = NULL, v3 = NULL WHERE k1 = 2; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = NULL, v4 = NULL WHERE k1 = 3; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = NULL WHERE k1 = 4; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = NULL, v2 = NULL, v3 = NULL WHERE k1 = 5; + +/*+ IndexOnlyScan(t_simple simple_v1_v2_v3) */ SELECT v1, v2, v3 FROM t_simple WHERE v1 <= 25 OR v1 IS NULL ORDER BY (v1, v2); +/*+ IndexScan(simple_v1_v2_v3) */ SELECT * FROM t_simple WHERE k1 <= 5 ORDER BY (v1, v2); + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = 1 WHERE k1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = 2, v3 = k1 WHERE k1 = 2; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v4 = k1 - k2 + v1, v3 = 3 WHERE k1 = 3; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 4 WHERE k1 = 4; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 5, v2 = 5, v3 = 5 WHERE k1 = 5; + +/*+ IndexOnlyScan(t_simple simple_v1_v2_v3) */ SELECT v1, v2, v3 FROM t_simple WHERE v1 <= 25 OR v1 IS NULL ORDER BY (v1, v2); +/*+ IndexScan(simple_v1_v2_v3) */ SELECT * FROM t_simple WHERE k1 <= 5 ORDER BY (v1, v2); + +DROP INDEX simple_v1_v2_v3; + +-- Create a unique index with nullable values and repeat the tests above. +CREATE INDEX NONCONCURRENTLY simple_unique_v1_v2_v3 ON t_simple (v1, v2) INCLUDE (v3); +-- Setting any of the primary key columns to NULL should be done via a single UPDATE +-- Setting any of the secondary index key columns to NULL will still require the delete + update +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = NULL WHERE k1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = NULL, v3 = NULL WHERE k1 = 2; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = NULL, v4 = NULL WHERE k1 = 3; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = NULL WHERE k1 = 4; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = NULL, v2 = NULL, v3 = NULL WHERE k1 = 5; + +/*+ IndexOnlyScan(t_simple simple_unique_v1_v2_v3) */ SELECT v1, v2, v3 FROM t_simple WHERE v1 <= 25 OR v1 IS NULL ORDER BY (v1, v2); +/*+ IndexScan(simple_unique_v1_v2_v3) */ SELECT * FROM t_simple WHERE k1 <= 5 ORDER BY (v1, v2); + +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = 1 WHERE k1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = 2, v3 = k1 WHERE k1 = 2; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v4 = k1 - k2 + v1, v3 = 3 WHERE k1 = 3; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 4 WHERE k1 = 4; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 5, v2 = 5, v3 = 5 WHERE k1 = 5; + +/*+ IndexOnlyScan(t_simple simple_unique_v1_v2_v3) */ SELECT v1, v2, v3 FROM t_simple WHERE v1 <= 25 OR v1 IS NULL ORDER BY (v1, v2); +/*+ IndexScan(simple_unique_v1_v2_v3) */ SELECT * FROM t_simple WHERE k1 <= 5 ORDER BY (v1, v2); + +DROP INDEX simple_unique_v1_v2_v3; + +-- +-- Tests to validate index update behavior for partial indexes +-- +TRUNCATE t_simple; +INSERT INTO t_simple (SELECT i, i, i, i, i, i FROM generate_series(1, 10) AS i); +CREATE INDEX NONCONCURRENTLY simple_partial_or ON t_simple (v1, v2) INCLUDE (v3) WHERE v1 < 5 OR v2 < 10; +CREATE INDEX NONCONCURRENTLY simple_partial_and ON t_simple (v1, v2) INCLUDE (v3) WHERE v1 < 5 AND v2 < 10; + +-- The row must be deleted from both indexes +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 11, v2 = 10 WHERE k1 = 1; +-- The rows must be deleted from the AND index but not the OR index +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 12, v2 = 8 WHERE k1 = 2; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = 15 WHERE k1 = 3; +-- Modifying the INCLUDE columns should have no impact on the index +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 10 WHERE k1 = 6; +-- The row must be inserted into both indexes +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 5, v2 = -5 WHERE k1 = 1; +-- The row must be inserted into one of the indexes but not the other +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 4 WHERE k1 = 2; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 4, v2 = 8, v3 = 100 WHERE k1 = 7; +-- Modifying the primary key columns should neither delete nor insert into the index +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k1 + 10, v3 = v3 + 1 WHERE k1 = 8; + +/*+ IndexOnlyScan(t_simple simple_partial_or) */ SELECT v1, v2, v3 FROM t_simple WHERE v1 < 5 OR v2 < 10 ORDER BY (v1, v2); +/*+ IndexScan(simple_unique_v1_v2_v3) */ SELECT * FROM t_simple WHERE v1 < 5 OR v2 < 10 ORDER BY (v1, v2); +/*+ IndexOnlyScan(t_simple simple_partial_and) */ SELECT v1, v2, v3 FROM t_simple WHERE v1 < 5 AND v2 < 10 ORDER BY (v1, v2); +/*+ IndexScan(simple_unique_v1_v2_v3) */ SELECT * FROM t_simple WHERE v1 < 5 AND v2 < 10 ORDER BY (v1, v2); +SELECT * FROM t_simple; + +DROP INDEX simple_partial_or; +DROP INDEX simple_partial_and; + +-- +-- Tests to validate index update behavior for expression indexes +-- +TRUNCATE t_simple; +INSERT INTO t_simple (SELECT i, i, i, i, i, i FROM generate_series(1, 10) AS i); +CREATE UNIQUE INDEX NONCONCURRENTLY simple_expr ON t_simple ((v1 + 10)) INCLUDE (v3); + +-- Updating any of the columns making up the expression should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 11 WHERE k1 = 1; +-- Any other column update should only require an UPDATE +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k2 + 1 WHERE k1 = 2; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10 WHERE k1 = 3; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k2 = k2 + 5, v3 = v3 + 5 WHERE k1 = 4; + +/*+ IndexScan(simple_expr) */ SELECT *, v1 + 10 FROM t_simple WHERE v1 + 10 IN (21, 12, 13, 14, 15, 16, 17, 18, 19, 20) ORDER BY (v1, v2); +SELECT * FROM t_simple; + +-- CREATE INDEX NONCONCURRENTLY complex_expr_v2 ON t_simple ((v1 * v2), v3) INCLUDE (v4); + +-- +-- Tests to validate multiple include columns +-- +TRUNCATE t_simple; +INSERT INTO t_simple (SELECT i, i, i, i, i, i FROM generate_series(1, 10) AS i); +CREATE UNIQUE INDEX NONCONCURRENTLY multi_include ON t_simple (v1, v2) INCLUDE (v4, v3, v2); + +-- Updating any of the key columns should require a DELETE + INSERT +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = 11 WHERE k1 = 1; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v2 = v2 + 1 WHERE k1 = 2; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v1 = v2 + 10, v2 = v1 + 1 WHERE k1 = 3; +-- Any other column update should only require an UPDATE +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v3 + 1 WHERE k1 = 4; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v3 = v4, v4 = v3 WHERE k1 = 5; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET v4 = v2 + 1 WHERE k1 = 6; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, k2 = k2 + 1 WHERE k1 = 7; +EXPLAIN (ANALYZE, DIST, COSTS OFF) UPDATE t_simple SET k1 = k1 + 10, k2 = k2 + 1 WHERE k1 = 8; + +EXPLAIN (VERBOSE) /*+ IndexOnlyScan(t_simple multi_include) */ SELECT v1, v2, v3, v4 FROM t_simple ORDER BY (v1, v2); +/*+ IndexOnlyScan(t_simple multi_include) */ SELECT v1, v2, v3, v4 FROM t_simple ORDER BY (v1, v2); +EXPLAIN (VERBOSE) /*+ IndexScan(multi_include) */ SELECT * FROM t_simple ORDER BY (v1, v2); +/*+ IndexScan(multi_include) */ SELECT * FROM t_simple ORDER BY (v1, v2); +SELECT * FROM t_simple; diff --git a/src/postgres/src/test/regress/yb_update_optimized_schedule b/src/postgres/src/test/regress/yb_update_optimized_schedule index ac68e229529d..878b29cdcfcd 100644 --- a/src/postgres/src/test/regress/yb_update_optimized_schedule +++ b/src/postgres/src/test/regress/yb_update_optimized_schedule @@ -6,3 +6,4 @@ test: yb_update_optimize_base test: yb_update_optimize_indices test: yb_update_optimize_triggers +test: yb_update_optimize_index_updates diff --git a/src/postgres/third-party-extensions/pgvector/src/ybvector/ybvector.c b/src/postgres/third-party-extensions/pgvector/src/ybvector/ybvector.c index 89db278a8617..d1113d5821c9 100644 --- a/src/postgres/third-party-extensions/pgvector/src/ybvector/ybvector.c +++ b/src/postgres/third-party-extensions/pgvector/src/ybvector/ybvector.c @@ -60,6 +60,7 @@ makeBaseYbVectorHandler() amroutine->ampredlocks = true; /* TODO(tanuj): check what this is */ amroutine->amcanparallel = false; amroutine->amcaninclude = false; + amroutine->ybamcanupdatetupleinplace = false; amroutine->amkeytype = InvalidOid; amroutine->ambuild = ybvectorbuild; @@ -85,6 +86,7 @@ makeBaseYbVectorHandler() amroutine->yb_amisforybrelation = true; amroutine->yb_aminsert = ybvectorinsert; amroutine->yb_amdelete = ybvectordelete; + amroutine->yb_amupdate = NULL; amroutine->yb_ambackfill = ybvectorbackfill; amroutine->yb_ammightrecheck = ybvectormightrecheck; amroutine->yb_ambindschema = NULL; diff --git a/src/postgres/third-party-extensions/pgvector/src/ybvector/ybvector.h b/src/postgres/third-party-extensions/pgvector/src/ybvector/ybvector.h index 6eee4a9e5993..1520e18e5a1b 100644 --- a/src/postgres/third-party-extensions/pgvector/src/ybvector/ybvector.h +++ b/src/postgres/third-party-extensions/pgvector/src/ybvector/ybvector.h @@ -83,6 +83,9 @@ extern bool ybvectorinsert(Relation rel, Datum *values, bool *isnull, extern void ybvectordelete(Relation rel, Datum *values, bool *isnull, Datum ybctid, Relation heapRel, struct IndexInfo *indexInfo); +extern void ybvectorinupdate(Relation index, Datum *values, bool *isnull, + Datum oldYbctid, Datum newYbctid, Relation heap, + struct IndexInfo *indexInfo); extern IndexBuildResult *ybvectorbackfill(Relation heap, Relation index, struct IndexInfo *indexInfo, struct YbBackfillInfo *bfinfo, diff --git a/src/postgres/third-party-extensions/pgvector/src/ybvector/ybvectorwrite.c b/src/postgres/third-party-extensions/pgvector/src/ybvector/ybvectorwrite.c index 0d40b4082fa5..ad228df44202 100644 --- a/src/postgres/third-party-extensions/pgvector/src/ybvector/ybvectorwrite.c +++ b/src/postgres/third-party-extensions/pgvector/src/ybvector/ybvectorwrite.c @@ -412,6 +412,13 @@ ybvectordelete(Relation index, Datum *values, bool *isnull, Datum ybctid, false /* isinsert */); } +void +ybvectorinupdate(Relation index, Datum *values, bool *isnull, Datum oldYbctid, + Datum newYbctid, Relation heap, struct IndexInfo *indexInfo) +{ + elog(ERROR, "Unexpected in-place update of ybvector index requested"); +} + IndexBuildResult * ybvectorbackfill(Relation heap, Relation index, struct IndexInfo *indexInfo, struct YbBackfillInfo *bfinfo, struct YbPgExecOutParam *bfresult) diff --git a/src/yb/docdb/pgsql_operation.cc b/src/yb/docdb/pgsql_operation.cc index 15f1a9a87f2e..7ae862f6e24a 100644 --- a/src/yb/docdb/pgsql_operation.cc +++ b/src/yb/docdb/pgsql_operation.cc @@ -1217,7 +1217,18 @@ Status PgsqlWriteOperation::ApplyUpdate(const DocOperationApplyData& data) { // skipped is set to false if this operation produces some data to write. bool skipped = true; - if (request_.has_ybctid_column_value()) { + // This function is invoked by three different callers: + // 1. Main table updates: requests have the YBCTID column field populated. + // 2. Secondary index updates: requests do not have the YBCTID column field populated. + // The tuple identifier is constructed from partition and range key columns. + // 3. Sequence updates: requests are similar to secondary index updates. These updates are sent + // by calling pggate directly, without going through the PostgreSQL layer. + // Since we cannot distinguish between (2) and (3), we make use of the table ID to determine the + // type of UPDATE to be performed. Sequence updates are always on the PG sequences data table. + bool is_sequence_update = + GetPgsqlTableId(kPgSequencesDataDatabaseOid, kPgSequencesDataTableOid) == request_.table_id(); + + if (!is_sequence_update) { const auto& schema = doc_read_context_->schema(); ExpressionHelper expression_helper; RETURN_NOT_OK(expression_helper.Init(schema, projection(), request_, table_row)); From 655649886da1f9877ec44b058f4689bbe2b5b2d2 Mon Sep 17 00:00:00 2001 From: jhe Date: Fri, 13 Sep 2024 01:44:32 -0700 Subject: [PATCH 38/75] [#23897] xClusterDDLRepl: Support create table with partition by primary key Summary: When creating a table with a partition by the primary key, the primary index created is classified as RELKIND_PARTITIONED_INDEX instead of RELKIND_INDEX. Adding support in the ddlrepl extension to handle this type of index as well - since this index is the same table as the parent table, we don't need to search for / replicate this index. Fixes #23897. Jira: DB-12801 Test Plan: ``` ybd --java-test "org.yb.pgsql.TestPgRegressYbExtensionsYbXclusterDdlReplication" ``` Reviewers: xCluster, yyan Reviewed By: yyan Subscribers: ybase, yql Differential Revision: https://phorge.dev.yugabyte.com/D38033 --- .../expected/create_drop_index.out | 20 ++++++++-------- .../expected/create_drop_table.out | 23 +++++++++++++++---- .../source_ddl_end_handler.c | 4 +++- .../sql/create_drop_table.sql | 6 +++++ 4 files changed, 37 insertions(+), 16 deletions(-) diff --git a/src/postgres/yb-extensions/yb_xcluster_ddl_replication/expected/create_drop_index.out b/src/postgres/yb-extensions/yb_xcluster_ddl_replication/expected/create_drop_index.out index 63311ae83163..937a0b818a5d 100644 --- a/src/postgres/yb-extensions/yb_xcluster_ddl_replication/expected/create_drop_index.out +++ b/src/postgres/yb-extensions/yb_xcluster_ddl_replication/expected/create_drop_index.out @@ -28,11 +28,11 @@ SET ROLE NONE; SELECT yb_data FROM yb_xcluster_ddl_replication.ddl_queue ORDER BY start_time; yb_data ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - {"user": "yugabyte", "query": "CREATE TABLE foo(i int PRIMARY KEY, a int, b text, c int);", "schema": "create_index", "version": 1, "command_tag": "CREATE TABLE", "new_rel_map": [{"rel_name": "foo", "relfile_oid": 16451}]} - {"user": "yugabyte", "query": "CREATE INDEX foo_idx_simple ON foo(a);", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_simple", "relfile_oid": 16456}]} - {"user": "yugabyte", "query": "CREATE UNIQUE INDEX foo_idx_unique ON foo(b);", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_unique", "relfile_oid": 16457}]} - {"user": "yugabyte", "query": "CREATE INDEX foo_idx_filtered ON foo(c ASC, a) WHERE a > c;", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_filtered", "relfile_oid": 16458}]} - {"user": "new_role", "query": "CREATE INDEX foo_idx_include ON foo(lower(b)) INCLUDE (a) SPLIT INTO 2 TABLETS;", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_include", "relfile_oid": 16459}]} + {"user": "yugabyte", "query": "CREATE TABLE foo(i int PRIMARY KEY, a int, b text, c int);", "schema": "create_index", "version": 1, "command_tag": "CREATE TABLE", "new_rel_map": [{"rel_name": "foo", "relfile_oid": 16459}]} + {"user": "yugabyte", "query": "CREATE INDEX foo_idx_simple ON foo(a);", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_simple", "relfile_oid": 16464}]} + {"user": "yugabyte", "query": "CREATE UNIQUE INDEX foo_idx_unique ON foo(b);", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_unique", "relfile_oid": 16465}]} + {"user": "yugabyte", "query": "CREATE INDEX foo_idx_filtered ON foo(c ASC, a) WHERE a > c;", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_filtered", "relfile_oid": 16466}]} + {"user": "new_role", "query": "CREATE INDEX foo_idx_include ON foo(lower(b)) INCLUDE (a) SPLIT INTO 2 TABLETS;", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_include", "relfile_oid": 16467}]} (5 rows) SELECT * FROM yb_xcluster_ddl_replication.replicated_ddls ORDER BY start_time; @@ -54,11 +54,11 @@ DROP TABLE foo; SELECT yb_data FROM yb_xcluster_ddl_replication.ddl_queue ORDER BY start_time; yb_data ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - {"user": "yugabyte", "query": "CREATE TABLE foo(i int PRIMARY KEY, a int, b text, c int);", "schema": "create_index", "version": 1, "command_tag": "CREATE TABLE", "new_rel_map": [{"rel_name": "foo", "relfile_oid": 16451}]} - {"user": "yugabyte", "query": "CREATE INDEX foo_idx_simple ON foo(a);", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_simple", "relfile_oid": 16456}]} - {"user": "yugabyte", "query": "CREATE UNIQUE INDEX foo_idx_unique ON foo(b);", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_unique", "relfile_oid": 16457}]} - {"user": "yugabyte", "query": "CREATE INDEX foo_idx_filtered ON foo(c ASC, a) WHERE a > c;", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_filtered", "relfile_oid": 16458}]} - {"user": "new_role", "query": "CREATE INDEX foo_idx_include ON foo(lower(b)) INCLUDE (a) SPLIT INTO 2 TABLETS;", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_include", "relfile_oid": 16459}]} + {"user": "yugabyte", "query": "CREATE TABLE foo(i int PRIMARY KEY, a int, b text, c int);", "schema": "create_index", "version": 1, "command_tag": "CREATE TABLE", "new_rel_map": [{"rel_name": "foo", "relfile_oid": 16459}]} + {"user": "yugabyte", "query": "CREATE INDEX foo_idx_simple ON foo(a);", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_simple", "relfile_oid": 16464}]} + {"user": "yugabyte", "query": "CREATE UNIQUE INDEX foo_idx_unique ON foo(b);", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_unique", "relfile_oid": 16465}]} + {"user": "yugabyte", "query": "CREATE INDEX foo_idx_filtered ON foo(c ASC, a) WHERE a > c;", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_filtered", "relfile_oid": 16466}]} + {"user": "new_role", "query": "CREATE INDEX foo_idx_include ON foo(lower(b)) INCLUDE (a) SPLIT INTO 2 TABLETS;", "schema": "create_index", "version": 1, "command_tag": "CREATE INDEX", "new_rel_map": [{"rel_name": "foo_idx_include", "relfile_oid": 16467}]} {"user": "yugabyte", "query": "DROP INDEX foo_idx_unique;", "schema": "create_index", "version": 1, "command_tag": "DROP INDEX"} {"user": "yugabyte", "query": "DROP INDEX foo_idx_filtered;", "schema": "create_index", "version": 1, "command_tag": "DROP INDEX"} {"user": "yugabyte", "query": "DROP TABLE foo;", "schema": "create_index", "version": 1, "command_tag": "DROP TABLE"} diff --git a/src/postgres/yb-extensions/yb_xcluster_ddl_replication/expected/create_drop_table.out b/src/postgres/yb-extensions/yb_xcluster_ddl_replication/expected/create_drop_table.out index a918473fd897..33a29b58ca20 100644 --- a/src/postgres/yb-extensions/yb_xcluster_ddl_replication/expected/create_drop_table.out +++ b/src/postgres/yb-extensions/yb_xcluster_ddl_replication/expected/create_drop_table.out @@ -36,6 +36,9 @@ SELECT * FROM yb_xcluster_ddl_replication.replicated_ddls ORDER BY start_time; 3 | 1 | {"query": "CREATE TABLE unique_foo(i int PRIMARY KEY, u text UNIQUE);"} (3 rows) +-- Test tables partitioned by their primary key or a column. +CREATE TABLE foo_partitioned_by_pkey(id int, PRIMARY KEY (id)) PARTITION BY RANGE (id); +CREATE TABLE foo_partitioned_by_col(id int) PARTITION BY RANGE (id); -- Now test dropping these tables. DROP TABLE foo; -- Check with manual replication flags enabled, ddl string is captured with flag. @@ -44,6 +47,8 @@ DROP TABLE manual_foo; SET yb_xcluster_ddl_replication.enable_manual_ddl_replication = 0; DROP TABLE extra_foo; DROP TABLE unique_foo; +DROP TABLE foo_partitioned_by_pkey; +DROP TABLE foo_partitioned_by_col; SELECT yb_data FROM yb_xcluster_ddl_replication.ddl_queue ORDER BY start_time; yb_data ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -51,11 +56,15 @@ SELECT yb_data FROM yb_xcluster_ddl_replication.ddl_queue ORDER BY start_time; {"user": "yugabyte", "query": "CREATE TABLE manual_foo(i int PRIMARY KEY);", "schema": "public", "version": 1, "command_tag": "CREATE TABLE", "manual_replication": true} {"user": "yugabyte", "query": "CREATE TABLE extra_foo(i int PRIMARY KEY) WITH (COLOCATION = false) SPLIT INTO 1 TABLETS;", "schema": "public", "version": 1, "command_tag": "CREATE TABLE", "new_rel_map": [{"rel_name": "extra_foo", "relfile_oid": 16422}]} {"user": "yugabyte", "query": "CREATE TABLE unique_foo(i int PRIMARY KEY, u text UNIQUE);", "schema": "public", "version": 1, "command_tag": "CREATE TABLE", "new_rel_map": [{"rel_name": "unique_foo", "relfile_oid": 16427}, {"rel_name": "unique_foo_u_key", "relfile_oid": 16432}]} + {"user": "yugabyte", "query": "CREATE TABLE foo_partitioned_by_pkey(id int, PRIMARY KEY (id)) PARTITION BY RANGE (id);", "schema": "public", "version": 1, "command_tag": "CREATE TABLE", "new_rel_map": [{"rel_name": "foo_partitioned_by_pkey", "relfile_oid": 16434}]} + {"user": "yugabyte", "query": "CREATE TABLE foo_partitioned_by_col(id int) PARTITION BY RANGE (id);", "schema": "public", "version": 1, "command_tag": "CREATE TABLE", "new_rel_map": [{"rel_name": "foo_partitioned_by_col", "relfile_oid": 16439}]} {"user": "yugabyte", "query": "DROP TABLE foo;", "schema": "public", "version": 1, "command_tag": "DROP TABLE"} {"user": "yugabyte", "query": "DROP TABLE manual_foo;", "schema": "public", "version": 1, "command_tag": "DROP TABLE", "manual_replication": true} {"user": "yugabyte", "query": "DROP TABLE extra_foo;", "schema": "public", "version": 1, "command_tag": "DROP TABLE"} {"user": "yugabyte", "query": "DROP TABLE unique_foo;", "schema": "public", "version": 1, "command_tag": "DROP TABLE"} -(8 rows) + {"user": "yugabyte", "query": "DROP TABLE foo_partitioned_by_pkey;", "schema": "public", "version": 1, "command_tag": "DROP TABLE"} + {"user": "yugabyte", "query": "DROP TABLE foo_partitioned_by_col;", "schema": "public", "version": 1, "command_tag": "DROP TABLE"} +(12 rows) SELECT * FROM yb_xcluster_ddl_replication.replicated_ddls ORDER BY start_time; start_time | query_id | yb_data @@ -63,10 +72,14 @@ SELECT * FROM yb_xcluster_ddl_replication.replicated_ddls ORDER BY start_time; 1 | 1 | {"query": "CREATE TABLE foo(i int PRIMARY KEY);"} 2 | 1 | {"query": "CREATE TABLE extra_foo(i int PRIMARY KEY) WITH (COLOCATION = false) SPLIT INTO 1 TABLETS;"} 3 | 1 | {"query": "CREATE TABLE unique_foo(i int PRIMARY KEY, u text UNIQUE);"} - 4 | 1 | {"query": "DROP TABLE foo;"} - 5 | 1 | {"query": "DROP TABLE extra_foo;"} - 6 | 1 | {"query": "DROP TABLE unique_foo;"} -(6 rows) + 4 | 1 | {"query": "CREATE TABLE foo_partitioned_by_pkey(id int, PRIMARY KEY (id)) PARTITION BY RANGE (id);"} + 5 | 1 | {"query": "CREATE TABLE foo_partitioned_by_col(id int) PARTITION BY RANGE (id);"} + 6 | 1 | {"query": "DROP TABLE foo;"} + 7 | 1 | {"query": "DROP TABLE extra_foo;"} + 8 | 1 | {"query": "DROP TABLE unique_foo;"} + 9 | 1 | {"query": "DROP TABLE foo_partitioned_by_pkey;"} + 10 | 1 | {"query": "DROP TABLE foo_partitioned_by_col;"} +(10 rows) -- Test mix of temp and regular tables. SET yb_xcluster_ddl_replication.replication_role = SOURCE; diff --git a/src/postgres/yb-extensions/yb_xcluster_ddl_replication/source_ddl_end_handler.c b/src/postgres/yb-extensions/yb_xcluster_ddl_replication/source_ddl_end_handler.c index 49a876d25787..02993dded2ca 100644 --- a/src/postgres/yb-extensions/yb_xcluster_ddl_replication/source_ddl_end_handler.c +++ b/src/postgres/yb-extensions/yb_xcluster_ddl_replication/source_ddl_end_handler.c @@ -88,7 +88,9 @@ ShouldReplicateCreateRelation(Oid rel_oid, List **new_rel_list) // Ignore temporary tables and primary indexes (same as main table). if (!IsYBBackedRelation(rel) || - (rel->rd_rel->relkind == RELKIND_INDEX && rel->rd_index->indisprimary)) + ((rel->rd_rel->relkind == RELKIND_INDEX || + rel->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) && + rel->rd_index->indisprimary)) { RelationClose(rel); return false; diff --git a/src/postgres/yb-extensions/yb_xcluster_ddl_replication/sql/create_drop_table.sql b/src/postgres/yb-extensions/yb_xcluster_ddl_replication/sql/create_drop_table.sql index 6bd5f55bf7c8..66d53b232cf2 100644 --- a/src/postgres/yb-extensions/yb_xcluster_ddl_replication/sql/create_drop_table.sql +++ b/src/postgres/yb-extensions/yb_xcluster_ddl_replication/sql/create_drop_table.sql @@ -25,6 +25,10 @@ CREATE TABLE unique_foo(i int PRIMARY KEY, u text UNIQUE); SELECT yb_data FROM yb_xcluster_ddl_replication.ddl_queue ORDER BY start_time; SELECT * FROM yb_xcluster_ddl_replication.replicated_ddls ORDER BY start_time; +-- Test tables partitioned by their primary key or a column. +CREATE TABLE foo_partitioned_by_pkey(id int, PRIMARY KEY (id)) PARTITION BY RANGE (id); +CREATE TABLE foo_partitioned_by_col(id int) PARTITION BY RANGE (id); + -- Now test dropping these tables. DROP TABLE foo; @@ -35,6 +39,8 @@ SET yb_xcluster_ddl_replication.enable_manual_ddl_replication = 0; DROP TABLE extra_foo; DROP TABLE unique_foo; +DROP TABLE foo_partitioned_by_pkey; +DROP TABLE foo_partitioned_by_col; SELECT yb_data FROM yb_xcluster_ddl_replication.ddl_queue ORDER BY start_time; SELECT * FROM yb_xcluster_ddl_replication.replicated_ddls ORDER BY start_time; From e2b1d28364e2eea6b6545ded5715817a8e555749 Mon Sep 17 00:00:00 2001 From: Nikhil Chandrappa Date: Mon, 16 Sep 2024 18:02:36 +0000 Subject: [PATCH 39/75] [#23363] Changing the default gflags of YSQL memory configuration. Summary: Updating the default gflags of yugabyted to use use_memory_defaults_optimized_for_ysql to use memory configuration optimised for YSQL workloads. Test Plan: Manual Tests Reviewers: sgarg-yb, djiang Reviewed By: djiang Subscribers: djiang, yugabyted-dev Differential Revision: https://phorge.dev.yugabyte.com/D38085 --- bin/yugabyted | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bin/yugabyted b/bin/yugabyted index 02470522e980..bd58bb306579 100755 --- a/bin/yugabyted +++ b/bin/yugabyted @@ -3213,6 +3213,7 @@ class ControlScript(object): "--placement_cloud={}".format(self.configs.saved_data.get("cloud_provider")), "--placement_region={}".format(self.configs.saved_data.get("cloud_region")), "--placement_zone={}".format(self.configs.saved_data.get("cloud_zone")), + "--use_memory_defaults_optimized_for_ysql=true", ] if fault_tolerance == "region": @@ -3244,8 +3245,9 @@ class ControlScript(object): os.path.join(self.configs.saved_data.get("data_dir"), "master-info")), "--master_enable_metrics_snapshotter=true", "--webserver_port={}".format(self.configs.saved_data.get("master_webserver_port")), - "--default_memory_limit_to_ram_ratio=0.35", "--instance_uuid_override={}".format(self.configs.saved_data.get("master_uuid")), + "--enforce_tablet_replica_limits=true", + "--split_respects_tablet_replica_limits=true", ] if self.configs.saved_data.get("secure"): @@ -3340,7 +3342,6 @@ class ControlScript(object): "--tserver_enable_metrics_snapshotter=true", "--metrics_snapshotter_interval_ms=11000", "--webserver_port={}".format(self.configs.saved_data.get("tserver_webserver_port")), - "--default_memory_limit_to_ram_ratio=0.6", "--instance_uuid_override={}".format(self.configs.saved_data.get("tserver_uuid")), "--start_redis_proxy=false", "--placement_uuid={}".format(self.configs.saved_data.get("placement_uuid")), From e717f43a2981b8d236e1c15a993115fdcd58cf4f Mon Sep 17 00:00:00 2001 From: Basava Date: Mon, 16 Sep 2024 13:34:47 -0500 Subject: [PATCH 40/75] [#23925] DocDB: Address recent regression of test PgWaitQueuesTest.MultiTabletFairness Summary: `PgWaitQueuesTest.MultiTabletFairness` has some timing based assertions that started to fail recently. In particular, each thread issues a `select for update...` and waits for all other threads to reach the same state. It seems like enabling shared memory in release mode has caused the flakiness in this test (only on mac release builds), which may be hints at `select for update...` taking longer after the change. But a potential regression affecting latencies due to that change is being looked at separately and it shouldn't have anything to do with this test. here's a snippet of the error we see on mac release builds. ``` ../../src/yb/yql/pgwrapper/pg_wait_on_conflict-test.cc:1317 Value of: queued_waiters.WaitFor(10s * kTimeMultiplier) Actual: false Expected: true ``` This diff addresses the test only issue by executing all reads with explicit lock requests before the for loop with timing based assertions. Jira: DB-12827 Test Plan: ./yb_build.sh release --cxx-test pgwrapper_pg_wait_on_conflict-test --gtest_filter PgWaitQueuesTest.MultiTabletFairness -n 50 --tp 1 Reviewers: rthallam, pjain, patnaik.balivada Reviewed By: patnaik.balivada Subscribers: ybase, yql Differential Revision: https://phorge.dev.yugabyte.com/D38050 --- src/yb/yql/pgwrapper/pg_wait_on_conflict-test.cc | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/yb/yql/pgwrapper/pg_wait_on_conflict-test.cc b/src/yb/yql/pgwrapper/pg_wait_on_conflict-test.cc index b611146aed6d..a6637c82892a 100644 --- a/src/yb/yql/pgwrapper/pg_wait_on_conflict-test.cc +++ b/src/yb/yql/pgwrapper/pg_wait_on_conflict-test.cc @@ -1291,6 +1291,12 @@ void PgWaitQueuesTest::TestMultiTabletFairness() const { update_conns.reserve(kNumUpdateConns); for (int i = 0; i < kNumUpdateConns; ++i) { update_conns.push_back(ASSERT_RESULT(Connect())); + auto& conn = update_conns.back(); + ASSERT_OK(conn.StartTransaction(IsolationLevel::SNAPSHOT_ISOLATION)); + // Establish a distributed transaction by obtaining a lock on some key outside of the contended + // range of keys. + ASSERT_OK(conn.FetchFormat("SELECT * FROM foo WHERE k=$0 FOR UPDATE", kNumKeys * 2 + i)); + LOG(INFO) << "Conn " << i << " started"; } TestThreadHolder thread_holder; @@ -1301,20 +1307,14 @@ void PgWaitQueuesTest::TestMultiTabletFairness() const { // in *serial* in both RR and RC isolation. for (int i = 0; i < kNumUpdateConns; ++i) { auto& conn = update_conns.at(i); - ASSERT_OK(conn.StartTransaction(IsolationLevel::SNAPSHOT_ISOLATION)); - // Establish a distributed transaction by obtaining a lock on some key outside of the contended - // range of keys. - ASSERT_OK(conn.FetchFormat("SELECT * FROM foo WHERE k=$0 FOR UPDATE", kNumKeys * 2 + i)); - LOG(INFO) << "Conn " << i << " started"; update_did_return[i] = false; - thread_holder.AddThreadFunctor( [i, &conn, &update_did_return = update_did_return[i], &queued_waiters, &update_query] { // Wait for all connections to queue their thread of execution auto txn_id = ASSERT_RESULT(conn.FetchRow("SELECT yb_get_current_transaction()")); LOG(INFO) << "Conn " << i << " queued with txn id " << yb::ToString(txn_id); queued_waiters.CountDown(); - ASSERT_TRUE(queued_waiters.WaitFor(10s * kTimeMultiplier)); + ASSERT_TRUE(queued_waiters.WaitFor(20s * kTimeMultiplier)); LOG(INFO) << "Conn " << i << " finished waiting"; // Set timeout to 10s so the test does not hang for default 600s timeout in case of failure. From 09b77028f06f9438faea30e214c99e1115326ef7 Mon Sep 17 00:00:00 2001 From: Bvsk Patnaik Date: Mon, 16 Sep 2024 11:57:16 -0700 Subject: [PATCH 41/75] [#23940] YSQL: Replace copyright string from YugaByteDB to YugabyteDB Summary: When running arc lint on a file that has ``` // Copyright (c) YugaByteDB, Inc. ``` the linter fails with an error ``` arc lint Exception Undefined offset: -1 (Run with `--trace` for a full exception trace.) ``` The error message is not informative since this is an array index out of bounds exception within the linter. This makes it difficult for devs to root cause the issue. This revision replaces the copyright string from the old version `YugaByteDB` to the correct version `YugabyteDB`. Jira: DB-12838 Test Plan: Jenkins Reviewers: amartsinchyk Reviewed By: amartsinchyk Subscribers: ybase, yql Differential Revision: https://phorge.dev.yugabyte.com/D38073 --- java/yb-client/src/test/java/org/yb/util/TestStringUtil.java | 2 +- .../test/java/org/yb/pgsql/TestPgRegressDistinctPushdown.java | 2 +- src/odyssey/sources/yb_auth_passthrough.c | 2 +- src/odyssey/sources/yb_auth_passthrough.h | 2 +- src/odyssey/sources/yb_oid_entry.c | 2 +- src/postgres/src/backend/optimizer/path/yb_uniqkeys.c | 4 ++-- src/postgres/src/backend/utils/misc/yb_ysql_conn_mgr_helper.c | 2 +- src/postgres/src/include/yb_ysql_conn_mgr_helper.h | 2 +- src/yb/ash/CMakeLists.txt | 2 +- src/yb/util/lw_function.h | 2 +- src/yb/vector/CMakeLists.txt | 2 +- src/yb/yql/pggate/pg_doc_metrics.cc | 2 +- src/yb/yql/pggate/pg_doc_metrics.h | 2 +- src/yb/yql/pggate/pg_function.cc | 2 +- src/yb/yql/pggate/pg_function.h | 2 +- src/yb/yql/pggate/pggate.h | 2 +- 16 files changed, 17 insertions(+), 17 deletions(-) diff --git a/java/yb-client/src/test/java/org/yb/util/TestStringUtil.java b/java/yb-client/src/test/java/org/yb/util/TestStringUtil.java index 595f38422163..27a1ebe08109 100644 --- a/java/yb-client/src/test/java/org/yb/util/TestStringUtil.java +++ b/java/yb-client/src/test/java/org/yb/util/TestStringUtil.java @@ -1,4 +1,4 @@ -// Copyright (c) YugaByteDB, Inc. +// Copyright (c) YugabyteDB, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgRegressDistinctPushdown.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgRegressDistinctPushdown.java index cea7b93aaea9..0405a554a835 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgRegressDistinctPushdown.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgRegressDistinctPushdown.java @@ -1,4 +1,4 @@ -// Copyright (c) YugaByteDB, Inc. +// Copyright (c) YugabyteDB, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at diff --git a/src/odyssey/sources/yb_auth_passthrough.c b/src/odyssey/sources/yb_auth_passthrough.c index aa0741e35250..206c9de5aa36 100644 --- a/src/odyssey/sources/yb_auth_passthrough.c +++ b/src/odyssey/sources/yb_auth_passthrough.c @@ -4,7 +4,7 @@ * Utilities for Ysql Connection Manager/Yugabyte (Postgres layer) integration * that have to be defined on the Ysql Connection Manager (Odyssey) side. * - * Copyright (c) YugaByteDB, Inc. + * Copyright (c) YugabyteDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy diff --git a/src/odyssey/sources/yb_auth_passthrough.h b/src/odyssey/sources/yb_auth_passthrough.h index db981d436d89..4f658bb314c3 100644 --- a/src/odyssey/sources/yb_auth_passthrough.h +++ b/src/odyssey/sources/yb_auth_passthrough.h @@ -4,7 +4,7 @@ * Utilities for Ysql Connection Manager/Yugabyte (Postgres layer) integration * that have to be defined on the Ysql Connection Manager (Odyssey) side. * - * Copyright (c) YugaByteDB, Inc. + * Copyright (c) YugabyteDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy diff --git a/src/odyssey/sources/yb_oid_entry.c b/src/odyssey/sources/yb_oid_entry.c index 264a56759056..d0e2d84ef235 100644 --- a/src/odyssey/sources/yb_oid_entry.c +++ b/src/odyssey/sources/yb_oid_entry.c @@ -4,7 +4,7 @@ * Utilities for Ysql Connection Manager/Yugabyte (Postgres layer) integration * that have to be defined on the Ysql Connection Manager (Odyssey) side. * - * Copyright (c) YugaByteDB, Inc. + * Copyright (c) YugabyteDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy diff --git a/src/postgres/src/backend/optimizer/path/yb_uniqkeys.c b/src/postgres/src/backend/optimizer/path/yb_uniqkeys.c index eddd7b110cbd..7d0ab6f9da76 100644 --- a/src/postgres/src/backend/optimizer/path/yb_uniqkeys.c +++ b/src/postgres/src/backend/optimizer/path/yb_uniqkeys.c @@ -1,9 +1,9 @@ /*-------------------------------------------------------------------------------------------------- * * yb_uniqkeys.c - * YugaByteDB distinct pushdown API + * YugabyteDB distinct pushdown API * - * Copyright (c) YugaByteDB, Inc. + * Copyright (c) YugabyteDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at diff --git a/src/postgres/src/backend/utils/misc/yb_ysql_conn_mgr_helper.c b/src/postgres/src/backend/utils/misc/yb_ysql_conn_mgr_helper.c index 3c0c1a7b3539..c72d260e3896 100644 --- a/src/postgres/src/backend/utils/misc/yb_ysql_conn_mgr_helper.c +++ b/src/postgres/src/backend/utils/misc/yb_ysql_conn_mgr_helper.c @@ -4,7 +4,7 @@ * Utilities for Ysql Connection Manager/Yugabyte (Postgres layer) integration * that have to be defined on the PostgreSQL side. * - * Copyright (c) YugaByteDB, Inc. + * Copyright (c) YugabyteDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy diff --git a/src/postgres/src/include/yb_ysql_conn_mgr_helper.h b/src/postgres/src/include/yb_ysql_conn_mgr_helper.h index f59b00749c29..2660df4c4d13 100644 --- a/src/postgres/src/include/yb_ysql_conn_mgr_helper.h +++ b/src/postgres/src/include/yb_ysql_conn_mgr_helper.h @@ -4,7 +4,7 @@ * Utilities for Ysql Connection Manager/Yugabyte (Postgres layer) integration * that have to be defined on the PostgreSQL side. * - * Copyright (c) YugaByteDB, Inc. + * Copyright (c) YugabyteDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy diff --git a/src/yb/ash/CMakeLists.txt b/src/yb/ash/CMakeLists.txt index 6bcdc554745d..345bdbd02d80 100644 --- a/src/yb/ash/CMakeLists.txt +++ b/src/yb/ash/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) YugaByteDB, Inc. +# Copyright (c) YugabyteDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at diff --git a/src/yb/util/lw_function.h b/src/yb/util/lw_function.h index 7126e14d9283..ac9020241311 100644 --- a/src/yb/util/lw_function.h +++ b/src/yb/util/lw_function.h @@ -1,4 +1,4 @@ -// Copyright (c) YugaByteDB, Inc. +// Copyright (c) YugabyteDB, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at diff --git a/src/yb/vector/CMakeLists.txt b/src/yb/vector/CMakeLists.txt index 400be2a4481f..589d32d6bb83 100644 --- a/src/yb/vector/CMakeLists.txt +++ b/src/yb/vector/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) YugaByteDB, Inc. +# Copyright (c) YugabyteDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at diff --git a/src/yb/yql/pggate/pg_doc_metrics.cc b/src/yb/yql/pggate/pg_doc_metrics.cc index 8f9424ca9538..3d9d40c06e35 100644 --- a/src/yb/yql/pggate/pg_doc_metrics.cc +++ b/src/yb/yql/pggate/pg_doc_metrics.cc @@ -1,5 +1,5 @@ //-------------------------------------------------------------------------------------------------- -// Copyright (c) YugaByteDB, Inc. +// Copyright (c) YugabyteDB, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at diff --git a/src/yb/yql/pggate/pg_doc_metrics.h b/src/yb/yql/pggate/pg_doc_metrics.h index 999aeb9cc3b7..8ee6d7a7462b 100644 --- a/src/yb/yql/pggate/pg_doc_metrics.h +++ b/src/yb/yql/pggate/pg_doc_metrics.h @@ -1,5 +1,5 @@ //-------------------------------------------------------------------------------------------------- -// Copyright (c) YugaByteDB, Inc. +// Copyright (c) YugabyteDB, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at diff --git a/src/yb/yql/pggate/pg_function.cc b/src/yb/yql/pggate/pg_function.cc index f1af54bd7eb3..d0f340f8cc6a 100644 --- a/src/yb/yql/pggate/pg_function.cc +++ b/src/yb/yql/pggate/pg_function.cc @@ -1,5 +1,5 @@ //-------------------------------------------------------------------------------------------------- -// Copyright (c) YugaByteDB, Inc. +// Copyright (c) YugabyteDB, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at diff --git a/src/yb/yql/pggate/pg_function.h b/src/yb/yql/pggate/pg_function.h index 28a8b0484213..3121d2686bdd 100644 --- a/src/yb/yql/pggate/pg_function.h +++ b/src/yb/yql/pggate/pg_function.h @@ -1,5 +1,5 @@ //-------------------------------------------------------------------------------------------------- -// Copyright (c) YugaByteDB, Inc. +// Copyright (c) YugabyteDB, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at diff --git a/src/yb/yql/pggate/pggate.h b/src/yb/yql/pggate/pggate.h index 5b998cb80f3c..209500d2f1d7 100644 --- a/src/yb/yql/pggate/pggate.h +++ b/src/yb/yql/pggate/pggate.h @@ -1,4 +1,4 @@ -// Copyright (c) YugaByteDB, Inc. +// Copyright (c) YugabyteDB, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at From add83ef259cb33b904c817145e163ab9b9cf0b0b Mon Sep 17 00:00:00 2001 From: Nikhil Chandrappa Date: Mon, 16 Sep 2024 18:40:56 +0000 Subject: [PATCH 42/75] [#23947] Update callhome URL to use https Summary: Changing the callhome URL to use https. Test Plan: Manaul Test Reviewers: djiang Reviewed By: djiang Subscribers: yugabyted-dev Differential Revision: https://phorge.dev.yugabyte.com/D38089 --- bin/yugabyted | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/yugabyted b/bin/yugabyted index bd58bb306579..648a3dea0ce3 100755 --- a/bin/yugabyted +++ b/bin/yugabyted @@ -5633,7 +5633,7 @@ class ControlScript(object): def callhome(self): if self.configs.saved_data.get("callhome"): try: - url = "http://diagnostics.yugabyte.com" + url = "https://diagnostics.yugabyte.com" headers = { "Content-Type": "application/json", "User-Agent": "Mozilla", From 69db717feb2a0c7d8686a7b1fb66bec4e0de7ab2 Mon Sep 17 00:00:00 2001 From: Premkumar Date: Mon, 16 Sep 2024 14:02:26 -0700 Subject: [PATCH 43/75] Update faq page (#23704) * update faq page * fix a typo * add preview/stable * edits and fixes * changing to design goals style format * format * making link shortcode take named params * format * edits to the faq page * add trade-offs * fixes from code review * format --------- Co-authored-by: Dwight Hodge Co-authored-by: aishwarya24 --- .../Yugabyte/spelling-exceptions.txt | 1 + .../preview/architecture/design-goals.md | 12 +- .../preview/architecture/key-concepts.md | 48 +-- .../contribute/docs/widgets-and-shortcodes.md | 6 +- docs/content/preview/faq/general.md | 305 ++++++------------ .../data-migration/migrate-from-postgres.md | 8 +- .../stable/architecture/design-goals.md | 12 +- .../stable/architecture/key-concepts.md | 48 +-- .../data-migration/migrate-from-postgres.md | 8 +- docs/layouts/shortcodes/link.html | 10 +- docs/layouts/shortcodes/release.html | 8 + 11 files changed, 182 insertions(+), 284 deletions(-) diff --git a/.github/vale-styles/Yugabyte/spelling-exceptions.txt b/.github/vale-styles/Yugabyte/spelling-exceptions.txt index 593e5d940f07..81fa77244429 100644 --- a/.github/vale-styles/Yugabyte/spelling-exceptions.txt +++ b/.github/vale-styles/Yugabyte/spelling-exceptions.txt @@ -321,6 +321,7 @@ Javafuzz JavaScript Jenkins Jenkinsfile +Jepsen Jira jq jQuery diff --git a/docs/content/preview/architecture/design-goals.md b/docs/content/preview/architecture/design-goals.md index 2477b434d6e0..bb36ecbacb81 100644 --- a/docs/content/preview/architecture/design-goals.md +++ b/docs/content/preview/architecture/design-goals.md @@ -17,15 +17,15 @@ type: docs ## Scalability -YugabyteDB scales out horizontally by adding more nodes to handle increasing data volumes and higher workloads. With YugabyteDB, you can also opt for vertical scaling choosing more powerful infrastructure components. {{}} +YugabyteDB scales out horizontally by adding more nodes to handle increasing data volumes and higher workloads. With YugabyteDB, you can also opt for vertical scaling choosing more powerful infrastructure components. {{}} ## High Availability -YugabyteDB ensures continuous availability, even in the face of individual node failures or network partitions. YugabyteDB achieves this by replicating data across multiple nodes and implementing failover mechanisms via leader election. {{}} +YugabyteDB ensures continuous availability, even in the face of individual node failures or network partitions. YugabyteDB achieves this by replicating data across multiple nodes and implementing failover mechanisms via leader election. {{}} ## Fault Tolerance -YugabyteDB is resilient to various types of failures, such as node crashes, network partitions, disk failures, and other hardware or software faults and failure of various fault domains. It can automatically recover from these failures without data loss or corruption. {{}} +YugabyteDB is resilient to various types of failures, such as node crashes, network partitions, disk failures, and other hardware or software faults and failure of various fault domains. It can automatically recover from these failures without data loss or corruption. {{}} ## Consistency @@ -66,7 +66,7 @@ YugabyteDB monitors and automatically re-balances the number of tablet leaders a ## Data locality -YugabyteDB supports colocated tables and databases which enables related data to be kept together on the same node for performance reasons. {{}} +YugabyteDB supports colocated tables and databases which enables related data to be kept together on the same node for performance reasons. {{}} ## Security @@ -113,7 +113,7 @@ In addition: ## Cassandra compatibility -[YCQL](../../api/ycql/) is a [semi-relational CQL API](../../explore/ycql-language/) that is best suited for internet-scale OLTP and HTAP applications needing massive write scalability and fast queries. YCQL supports distributed transactions, strongly-consistent secondary indexes, and a native JSON column type. YCQL has its roots in the Cassandra Query Language. {{}} +[YCQL](../../api/ycql/) is a [semi-relational CQL API](../../explore/ycql-language/) that is best suited for internet-scale OLTP and HTAP applications needing massive write scalability and fast queries. YCQL supports distributed transactions, strongly-consistent secondary indexes, and a native JSON column type. YCQL has its roots in the Cassandra Query Language. {{}} ## Performance @@ -143,7 +143,7 @@ YugabyteDB has been designed with several cloud-native principles in mind. ## Kubernetes-ready -YugabyteDB works natively in Kubernetes and other containerized environments as a stateful application. {{}} +YugabyteDB works natively in Kubernetes and other containerized environments as a stateful application. {{}} ## Open source diff --git a/docs/content/preview/architecture/key-concepts.md b/docs/content/preview/architecture/key-concepts.md index 04d6328f24fc..503d22a16e2c 100644 --- a/docs/content/preview/architecture/key-concepts.md +++ b/docs/content/preview/architecture/key-concepts.md @@ -31,7 +31,7 @@ YugabyteDB provides ACID guarantees for all [transactions](#transaction). ## CDC - Change data capture -CDC is a software design pattern used in database systems to capture and propagate data changes from one database to another in real-time or near real-time. YugabyteDB supports transactional CDC guaranteeing changes across tables are captured together. This enables use cases like real-time analytics, data warehousing, operational data replication, and event-driven architectures. {{}} +CDC is a software design pattern used in database systems to capture and propagate data changes from one database to another in real-time or near real-time. YugabyteDB supports transactional CDC guaranteeing changes across tables are captured together. This enables use cases like real-time analytics, data warehousing, operational data replication, and event-driven architectures. {{}} ## Cluster @@ -43,11 +43,11 @@ Sometimes the term *cluster* is used interchangeably with the term *universe*. H ## DocDB -DocDB is the underlying document storage engine of YugabyteDB and is built on top of a highly customized and optimized verison of [RocksDB](http://rocksdb.org/). {{}} +DocDB is the underlying document storage engine of YugabyteDB and is built on top of a highly customized and optimized verison of [RocksDB](http://rocksdb.org/). {{}} ## Fault domain -A fault domain is a potential point of failure. Examples of fault domains would be nodes, racks, zones, or entire regions. {{}} +A fault domain is a potential point of failure. Examples of fault domains would be nodes, racks, zones, or entire regions. {{}} ## Fault tolerance @@ -59,15 +59,15 @@ The fault tolerance determines how resilient the cluster is to domain (that is, Normally, only the [tablet leader](#tablet-leader) can process user-facing write and read requests. Follower reads allow you to lower read latencies by serving reads from the tablet followers. This is similar to reading from a cache, which can provide more read IOPS with low latency. The data might be slightly stale, but is timeline-consistent, meaning no out of order data is possible. -Follower reads are particularly beneficial in applications that can tolerate staleness. For instance, in a social media application where a post gets a million likes continuously, slightly stale reads are acceptable, and immediate updates are not necessary because the absolute number may not really matter to the end-user reading the post. In such cases, a slightly older value from the closest replica can achieve improved performance with lower latency. Follower reads are required when reading from [read replicas](#read-replica-cluster). {{}} +Follower reads are particularly beneficial in applications that can tolerate staleness. For instance, in a social media application where a post gets a million likes continuously, slightly stale reads are acceptable, and immediate updates are not necessary because the absolute number may not really matter to the end-user reading the post. In such cases, a slightly older value from the closest replica can achieve improved performance with lower latency. Follower reads are required when reading from [read replicas](#read-replica-cluster). {{}} ## Hybrid time -Hybrid time/timestamp is a monotonically increasing timestamp derived using [Hybrid Logical clock](../transactions/transactions-overview/#hybrid-logical-clocks). Multiple aspects of YugabyteDB's transaction model are based on hybrid time. {{}} +Hybrid time/timestamp is a monotonically increasing timestamp derived using [Hybrid Logical clock](../transactions/transactions-overview/#hybrid-logical-clocks). Multiple aspects of YugabyteDB's transaction model are based on hybrid time. {{}} ## Isolation levels -[Transaction](#transaction) isolation levels define the degree to which transactions are isolated from each other. Isolation levels determine how changes made by one transaction become visible to other concurrent transactions. {{}} +[Transaction](#transaction) isolation levels define the degree to which transactions are isolated from each other. Isolation levels determine how changes made by one transaction become visible to other concurrent transactions. {{}} {{}} YugabyteDB offers 3 isolation levels - [Serializable](../../explore/transactions/isolation-levels/#serializable-isolation), [Snapshot](../../explore/transactions/isolation-levels/#snapshot-isolation) and [Read committed](../../explore/transactions/isolation-levels/#read-committed-isolation) - in the {{}} API and one isolation level - [Snapshot](../../develop/learn/transactions/acid-transactions-ycql/) - in the {{}} API. @@ -79,11 +79,11 @@ YugabyteDB tries to keep the number of leaders evenly distributed across the [no ## Leader election -Amongst the [tablet](#tablet) replicas, one tablet is elected [leader](#tablet-leader) as per the [Raft](../docdb-replication/raft) protocol. {{}} +Amongst the [tablet](#tablet) replicas, one tablet is elected [leader](#tablet-leader) as per the [Raft](../docdb-replication/raft) protocol. {{}} ## Master server -The [YB-Master](../yb-master/) service is responsible for keeping system metadata, coordinating system-wide operations, such as creating, altering, and dropping tables, as well as initiating maintenance operations such as load balancing. {{}} +The [YB-Master](../yb-master/) service is responsible for keeping system metadata, coordinating system-wide operations, such as creating, altering, and dropping tables, as well as initiating maintenance operations such as load balancing. {{}} {{}} The master server is also typically referred as just **master**. @@ -91,7 +91,7 @@ The master server is also typically referred as just **master**. ## MVCC -MVCC stands for Multi-version Concurrency Control. It is a concurrency control method used by YugabyteDB to provide access to data in a way that allows concurrent queries and updates without causing conflicts. {{}} +MVCC stands for Multi-version Concurrency Control. It is a concurrency control method used by YugabyteDB to provide access to data in a way that allows concurrent queries and updates without causing conflicts. {{}} ## Namespace @@ -123,7 +123,7 @@ Designating one region as preferred can reduce the number of network hops needed Regardless of the preferred region setting, data is replicated across all the regions in the cluster to ensure region-level fault tolerance. -You can enable [follower reads](#follower-reads) to serve reads from non-preferred regions. In cases where the cluster has [read replicas](#read-replica-cluster) and a client connects to a read replica, reads are served from the replica; writes continue to be handled by the preferred region. {{}} +You can enable [follower reads](#follower-reads) to serve reads from non-preferred regions. In cases where the cluster has [read replicas](#read-replica-cluster) and a client connects to a read replica, reads are served from the replica; writes continue to be handled by the preferred region. {{}} ## Primary cluster @@ -131,17 +131,17 @@ A primary cluster can perform both writes and reads, unlike a [read replica clus ## Raft -Raft stands for Replication for availability and fault tolerance. This is the algorithm that YugabyteDB uses for replication guaranteeing consistency. {{}} +Raft stands for Replication for availability and fault tolerance. This is the algorithm that YugabyteDB uses for replication guaranteeing consistency. {{}} ## Read replica cluster Read replica clusters are optional clusters that can be set up in conjunction with a [primary cluster](#primary-cluster) to perform only reads; writes sent to read replica clusters get automatically rerouted to the primary cluster of the [universe](#universe). These clusters enable reads in regions that are far away from the primary cluster with timeline-consistent data. This ensures low latency reads for geo-distributed applications. -Data is brought into the read replica clusters through asynchronous replication from the primary cluster. In other words, [nodes](#node) in a read replica cluster act as Raft observers that do not participate in the write path involving the Raft leader and Raft followers present in the primary cluster. Reading from read replicas requires enabling [follower reads](#follower-reads). {{}} +Data is brought into the read replica clusters through asynchronous replication from the primary cluster. In other words, [nodes](#node) in a read replica cluster act as Raft observers that do not participate in the write path involving the Raft leader and Raft followers present in the primary cluster. Reading from read replicas requires enabling [follower reads](#follower-reads). {{}} ## Rebalancing -Rebalancing is the process of keeping an even distribution of tablets across the [nodes](#node) in a cluster. {{}} +Rebalancing is the process of keeping an even distribution of tablets across the [nodes](#node) in a cluster. {{}} ## Region @@ -151,24 +151,24 @@ A region refers to a defined geographical area or location where a cloud provide The number of copies of data in a YugabyteDB universe. YugabyteDB replicates data across [fault domains](#fault-domain) (for example, zones) in order to tolerate faults. [Fault tolerance](#fault-tolerance) (FT) and RF are correlated. To achieve a FT of k nodes, the universe has to be configured with a RF of (2k + 1). -The RF should be an odd number to ensure majority consensus can be established during failures. {{}} +The RF should be an odd number to ensure majority consensus can be established during failures. {{}} Each [read replica](#read-replica-cluster) cluster can also have its own replication factor. In this case, the replication factor determines how many copies of your primary data the read replica has; multiple copies ensure the availability of the replica in case of a node outage. Replicas *do not* participate in the primary cluster Raft consensus, and do not affect the fault tolerance of the primary cluster or contribute to failover. ## Sharding -Sharding is the process of mapping a table row to a [tablet](#tablet). YugabyteDB supports 2 types of sharding, Hash and Range. {{}} +Sharding is the process of mapping a table row to a [tablet](#tablet). YugabyteDB supports 2 types of sharding, Hash and Range. {{}} ## Smart driver A smart driver in the context of YugabyteDB is essentially a PostgreSQL driver with additional "smart" features that leverage the distributed nature of YugabyteDB. These smart drivers intelligently distribute application connections across the nodes and regions of a YugabyteDB cluster, eliminating the need for external load balancers. This results in balanced connections that provide lower latencies and prevent hot nodes. For geographically-distributed applications, the driver can seamlessly connect to the geographically nearest regions and availability zones for lower latency. Smart drivers are optimized for use with a distributed SQL database, and are both cluster-aware and topology-aware. They keep track of the members of the cluster as well as their locations. As nodes are added or removed from clusters, the driver updates its membership and topology information. The drivers read the database cluster topology from the metadata table, and route new connections to individual instance endpoints without relying on high-level cluster endpoints. The smart drivers are also capable of load balancing read-only connections across the available YB-TServers. -. {{}} +. {{}} ## Tablet -YugabyteDB splits a table into multiple small pieces called tablets for data distribution. The word "tablet" finds its origins in ancient history, when civilizations utilized flat slabs made of clay or stone as surfaces for writing and maintaining records. {{}} +YugabyteDB splits a table into multiple small pieces called tablets for data distribution. The word "tablet" finds its origins in ancient history, when civilizations utilized flat slabs made of clay or stone as surfaces for writing and maintaining records. {{}} {{}} Tablets are also referred as shards. @@ -184,15 +184,15 @@ In a cluster, each [tablet](#tablet) is replicated as per the [replication facto ## Tablet splitting -When a tablet reaches a threshold size, it splits into 2 new [tablets](#tablet). This is a very quick operation. {{}} +When a tablet reaches a threshold size, it splits into 2 new [tablets](#tablet). This is a very quick operation. {{}} ## Transaction -A transaction is a sequence of operations performed as a single logical unit of work. YugabyteDB provides [ACID](#acid) guarantees for transactions. {{}} +A transaction is a sequence of operations performed as a single logical unit of work. YugabyteDB provides [ACID](#acid) guarantees for transactions. {{}} ## TServer -The [YB-TServer](../yb-tserver) service is responsible for maintaining and managing table data in the form of tablets, as well as dealing with all the queries. {{}} +The [YB-TServer](../yb-tserver) service is responsible for maintaining and managing table data in the form of tablets, as well as dealing with all the queries. {{}} ## Universe @@ -204,19 +204,19 @@ Sometimes the terms *universe* and *cluster* are used interchangeably. The two a ## xCluster -xCluster is a type of deployment where data is replicated asynchronously between two [universes](#universe) - a primary and a standby. The standby can be used for disaster recovery. YugabyteDB supports transactional xCluster {{}}. +xCluster is a type of deployment where data is replicated asynchronously between two [universes](#universe) - a primary and a standby. The standby can be used for disaster recovery. YugabyteDB supports transactional xCluster {{}}. ## YCQL -Semi-relational SQL API that is best fit for internet-scale OLTP and HTAP apps needing massive write scalability as well as blazing-fast queries. It supports distributed transactions, strongly consistent secondary indexes, and a native JSON column type. YCQL has its roots in the Cassandra Query Language. {{}} +Semi-relational SQL API that is best fit for internet-scale OLTP and HTAP apps needing massive write scalability as well as blazing-fast queries. It supports distributed transactions, strongly consistent secondary indexes, and a native JSON column type. YCQL has its roots in the Cassandra Query Language. {{}} ## YQL -The YugabyteDB Query Layer (YQL) is the primary layer that provides interfaces for applications to interact with using client drivers. This layer deals with the API-specific aspects such as query/command compilation and the run-time (data type representations, built-in operations, and more). {{}} +The YugabyteDB Query Layer (YQL) is the primary layer that provides interfaces for applications to interact with using client drivers. This layer deals with the API-specific aspects such as query/command compilation and the run-time (data type representations, built-in operations, and more). {{}} ## YSQL -Fully-relational SQL API that is wire compatible with the SQL language in PostgreSQL. It is best fit for RDBMS workloads that need horizontal write scalability and global data distribution while also using relational modeling features such as JOINs, distributed transactions, and referential integrity (such as foreign keys). Note that YSQL reuses the native query layer of the PostgreSQL open source project. {{}} +Fully-relational SQL API that is wire compatible with the SQL language in PostgreSQL. It is best fit for RDBMS workloads that need horizontal write scalability and global data distribution while also using relational modeling features such as JOINs, distributed transactions, and referential integrity (such as foreign keys). Note that YSQL reuses the native query layer of the PostgreSQL open source project. {{}} ## Zone diff --git a/docs/content/preview/contribute/docs/widgets-and-shortcodes.md b/docs/content/preview/contribute/docs/widgets-and-shortcodes.md index 2399da8d9e28..3a44001391f7 100644 --- a/docs/content/preview/contribute/docs/widgets-and-shortcodes.md +++ b/docs/content/preview/contribute/docs/widgets-and-shortcodes.md @@ -84,9 +84,9 @@ This is a warning with a [link](https://www.yugabyte.com). You can add a link to a url with an icon using the `link` shortcode which takes url as a string argument. Internal and external links will have different icons. You can use the `:version` variable to expand to all versions. -- {{}} : _External link_ `{{}}` -- {{}} : _Relative internal link_ `{{}}` -- {{}} : _Full path internal link_ `{{}}` +- {{}} : _External link_ `{{}}` +- {{}} : _Relative internal link_ `{{}}` +- {{}} : _Full path internal link_ `{{}}` ## Tables diff --git a/docs/content/preview/faq/general.md b/docs/content/preview/faq/general.md index 77d1f803e9ba..2c405c8b3b53 100644 --- a/docs/content/preview/faq/general.md +++ b/docs/content/preview/faq/general.md @@ -19,310 +19,193 @@ rightNav: hideH4: true --- -### Contents - -##### YugabyteDB - -- [What is YugabyteDB?](#what-is-yugabytedb) -- [What makes YugabyteDB unique?](#what-makes-yugabytedb-unique) -- [How many major releases YugabyteDB has had so far?](#how-many-major-releases-yugabytedb-has-had-so-far) -- [Is YugabyteDB open source?](#is-yugabytedb-open-source) -- [Can I deploy YugabyteDB to production?](#can-i-deploy-yugabytedb-to-production) -- [Which companies are currently using YugabyteDB in production?](#which-companies-are-currently-using-yugabytedb-in-production) -- [What is the definition of the "Beta" feature tag?](#what-is-the-definition-of-the-beta-feature-tag) -- [How do YugabyteDB, YugabyteDB Anywhere, and YugabyteDB Aeon differ from each other?](#how-do-yugabytedb-yugabytedb-anywhere-and-yugabytedb-aeon-differ-from-each-other) -- [How do I report a security vulnerability?](#how-do-i-report-a-security-vulnerability) - -##### Evaluating YugabyteDB - -- [What are the trade-offs involved in using YugabyteDB?](#what-are-the-trade-offs-involved-in-using-yugabytedb) -- [When is YugabyteDB a good fit?](#when-is-yugabytedb-a-good-fit) -- [When is YugabyteDB not a good fit?](#when-is-yugabytedb-not-a-good-fit) -- [Any performance benchmarks available?](#any-performance-benchmarks-available) -- [What about correctness testing?](#what-about-correctness-testing) -- [How does YugabyteDB compare to other SQL and NoSQL databases?](#how-does-yugabytedb-compare-to-other-sql-and-nosql-databases) - -##### Architecture - -- [How does YugabyteDB's common document store work?](#how-does-yugabytedb-s-common-document-store-work) -- [How can YugabyteDB be both CP and ensure high availability at the same time?](#how-can-yugabytedb-be-both-cp-and-ensure-high-availability-at-the-same-time) -- [Why is a group of YugabyteDB nodes called a universe instead of the more commonly used term clusters?](#why-is-a-group-of-yugabytedb-nodes-called-a-universe-instead-of-the-more-commonly-used-term-clusters) -- [Why is consistent hash sharding the default sharding strategy?](#why-is-consistent-hash-sharding-the-default-sharding-strategy) - ## YugabyteDB -### What is YugabyteDB? - - - -YugabyteDB is a high-performance distributed SQL database for powering global, internet-scale applications. Built using a unique combination of high-performance document store, per-shard distributed consensus replication and multi-shard ACID transactions (inspired by Google Spanner), YugabyteDB serves both scale-out RDBMS and internet-scale OLTP workloads with low query latency, extreme resilience against failures and global data distribution. As a cloud native database, it can be deployed across public and private clouds as well as in Kubernetes environments with ease. - -YugabyteDB is developed and distributed as an [Apache 2.0 open source project](https://github.com/yugabyte/yugabyte-db/). - -### What makes YugabyteDB unique? - -YugabyteDB is a transactional database that brings together four must-have needs of cloud native apps - namely SQL as a flexible query language, low-latency performance, continuous availability, and globally-distributed scalability. Other databases do not serve all 4 of these needs simultaneously. - -- Monolithic SQL databases offer SQL and low-latency reads, but neither have the ability to tolerate failures, nor can they scale writes across multiple nodes, zones, regions, and clouds. +### What is YugabyteDB -- Distributed NoSQL databases offer read performance, high availability, and write scalability, but give up on SQL features such as relational data modeling and ACID transactions. +YugabyteDB is a high-performant, highly available and scalable distributed SQL database designed for powering global, internet-scale applications. It is fully compatible with [PostgreSQL](https://www.postgresql.org/) and provides strong [ACID](/preview/architecture/key-concepts/#acid) guarantees for distributed transactions. It can be deployed in a single region, multi-region, and multi-cloud setups. -YugabyteDB feature highlights are listed below. +{{}} -#### SQL and ACID transactions +### What makes YugabyteDB unique -- SQL [JOINs](../../quick-start/explore/ysql/#join) and [distributed transactions](../../explore/transactions/distributed-transactions-ysql/) that allow multi-row access across any number of shards at any scale. +YugabyteDB stands out as a unique database solution due to its combination of features that bring together the strengths of both traditional SQL databases and modern NoSQL systems. It is [horizontally scalable](/preview/explore/linear-scalability/), supports global geo-distribution, supports [SQL (YSQL)](/preview/explore/ysql-language-features/sql-feature-support/) and [NoSQL (YCQL)](/preview/explore/ycql-language/) APIs, is [highly performant](/preview/benchmark/) and gurantees strong transactional consistency. -- Transactional [document store](../../architecture/docdb/) backed by self-healing, strongly-consistent, synchronous [replication](../../architecture/docdb-replication/replication/). +{{}} -#### High performance and massive scalability - -- Low latency for geo-distributed applications with multiple [read consistency levels](../../architecture/docdb-replication/replication/#follower-reads) and [read replicas](../../architecture/docdb-replication/read-replicas/). - -- Linearly scalable throughput for ingesting and serving ever-growing datasets. - -#### Global data consistency - -- [Global data distribution](../../explore/multi-region-deployments/) that brings consistent data close to users through multi-region and multi-cloud deployments. Optional two-region multi-master and master-follower configurations powered by CDC-driven asynchronous replication. - -- [Auto-sharding and auto-rebalancing](../../architecture/docdb-sharding/sharding/) to ensure uniform load across all nodes even for very large clusters. - -#### Cloud native - -- Built for the container era with [highly elastic scaling](../../explore/linear-scalability/) and infrastructure portability, including [Kubernetes-driven orchestration](../../quick-start/kubernetes/). +### Is YugabyteDB open source? -- [Self-healing database](../../explore/fault-tolerance/) that automatically tolerates any failures common in the inherently unreliable modern cloud infrastructure. +YugabyteDB is 100% open source. It is licensed under Apache 2.0. -#### Open source +{{}} -- Fully functional distributed database available under [Apache 2.0 open source license](https://github.com/yugabyte/yugabyte-db/). +### How many major releases YugabyteDB has had so far? -#### Built-in enterprise features +YugabyteDB released its first beta, [v0.9](https://www.yugabyte.com/blog/yugabyte-has-arrived/) in November 2017. Since then, several stable and preview versions have been released. The current stable version is {{}}, and the current preview version is {{}}. -- Starting in [v1.3](https://www.yugabyte.com/blog/announcing-yugabyte-db-v1-3-with-enterprise-features-as-open-source/), YugabyteDB is the only open-source distributed SQL database to have built-in enterprise features such as Distributed Backups, Data Encryption, and Read Replicas. New features such as [Change Data Capture (CDC)](../../architecture/docdb-replication/change-data-capture/) and [2 Data Center Deployments](../../architecture/docdb-replication/async-replication/) are also included in open source. +{{}} -### How many major releases YugabyteDB has had so far? +### What is the difference between preview and stable versions? -YugabyteDB has had the following major (stable) releases: - -- [v2.20](https://www.yugabyte.com/blog/release-220-announcement/) in November 2023 -- [v2.18](https://www.yugabyte.com/blog/release-218-announcement/) in May 2023 -- [v2.16](https://www.yugabyte.com/blog/yugabytedb-216/) in December 2022 -- [v2.14](https://www.yugabyte.com/blog/announcing-yugabytedb-2-14-higher-performance-and-security/) in July 2022. -- [v2.12](https://www.yugabyte.com/blog/announcing-yugabytedb-2-12/) in February 2022. (There was no v2.10 release.) -- [v2.8](https://www.yugabyte.com/blog/announcing-yugabytedb-2-8/) in November 2021. -- [v2.6](https://www.yugabyte.com/blog/announcing-yugabytedb-2-6/) in July 2021. -- [v2.4](https://www.yugabyte.com/blog/announcing-yugabytedb-2-4/) in January 2021. -- [v2.2](https://www.yugabyte.com/blog/announcing-yugabytedb-2-2-distributed-sql-made-easy/) in July 2020. -- [v2.1](https://www.yugabyte.com/blog/yugabytedb-2-1-is-ga-scaling-new-heights-with-distributed-sql/) in February 2020. -- [v2.0](https://www.yugabyte.com/blog/announcing-yugabyte-db-2-0-ga:-jepsen-tested,-high-performance-distributed-sql/) in September 2019. -- [v1.3](https://www.yugabyte.com/blog/announcing-yugabyte-db-v1-3-with-enterprise-features-as-open-source/) in July 2019. -- [v1.2](https://www.yugabyte.com/blog/announcing-yugabyte-db-1-2-company-update-jepsen-distributed-sql/) in March 2019. -- [v1.1](https://www.yugabyte.com/blog/announcing-yugabyte-db-1-1-and-company-update/) in September 2018. -- [v1.0](https://www.yugabyte.com/blog/announcing-yugabyte-db-1-0) in May 2018. -- [v0.9 Beta](https://www.yugabyte.com/blog/yugabyte-has-arrived/) in November 2017. - -Releases, including upcoming releases, are outlined on the [Releases Overview](/preview/releases/) page. The roadmap for this release can be found on [GitHub](https://github.com/yugabyte/yugabyte-db#whats-being-worked-on). +Preview releases include features under active development and are recommended for development and testing only. Stable releases undergo rigorous testing for a longer period of time and are ready for production use. -### Is YugabyteDB open source? +{{}} -Starting with [v1.3](https://www.yugabyte.com/blog/announcing-yugabyte-db-v1-3-with-enterprise-features-as-open-source/), YugabyteDB is 100% open source. It is licensed under Apache 2.0 and the source is available on [GitHub](https://github.com/yugabyte/yugabyte-db). +### What are the upcoming features? -### Can I deploy YugabyteDB to production? +The roadmap for upcoming releases and the list of recently released features can be found in the [yugabyte-db](https://github.com/yugabyte/yugabyte-db) repository on GitHub. -Yes, both YugabyteDB APIs are production ready. [YCQL](https://www.yugabyte.com/blog/yugabyte-db-1-0-a-peek-under-the-hood/) achieved this status starting with v1.0 in May 2018 while [YSQL](https://www.yugabyte.com/blog/announcing-yugabyte-db-2-0-ga:-jepsen-tested,-high-performance-distributed-sql/) became production ready starting v2.0 in September 2019. +{{}} ### Which companies are currently using YugabyteDB in production? -Reference deployments are listed in [Success Stories](https://www.yugabyte.com/success-stories/). - -### What is the definition of the "Beta" feature tag? - -Some features are marked Beta in every release. Following are the points to consider: +Global organizations of all sizes leverage YugabyteDB to fulfill their application requirements. -- Code is well tested. Enabling the feature is considered safe. Some of these features enabled by default. +{{}} -- Support for the overall feature will not be dropped, though details may change in incompatible ways in a subsequent beta or GA release. +### How do I report a security vulnerability? -- Recommended only for non-production use. +Follow the steps in the [vulnerability disclosure policy](../../secure/vulnerability-disclosure-policy) to report a vulnerability to our security team. The policy outlines our commitments to you when you disclose a potential vulnerability, the reporting process, and how we will respond. -Please do try our beta features and give feedback on them on our [Slack community]({{}}) or by filing a [GitHub issue](https://github.com/yugabyte/yugabyte-db/issues). +### What are YugabyteDB Anywhere and YugabyteDB Aeon? -### How do YugabyteDB, YugabyteDB Anywhere, and YugabyteDB Aeon differ from each other? +**[YugabyteDB](../../)** is the 100% open source core database. It is the best choice for startup organizations with strong technical operations expertise looking to deploy to production with traditional DevOps tools. -[YugabyteDB](../../) is the 100% open source core database. It is the best choice for the startup organizations with strong technical operations expertise looking to deploy to production with traditional DevOps tools. +**[YugabyteDB Anywhere](../../yugabyte-platform/)** is commercial software for running a self-managed YugabyteDB-as-a-Service. It has built-in cloud native operations, enterprise-grade deployment options, and world-class support. -[YugabyteDB Anywhere](../../yugabyte-platform/) is commercial software for running a self-managed YugabyteDB-as-a-Service. It has built-in cloud native operations, enterprise-grade deployment options, and world-class support. It is the simplest way to run YugabyteDB in mission-critical production environments with one or more regions (across both public cloud and on-premises data centers). +**[YugabyteDB Aeon](../../yugabyte-cloud/)** is a fully-managed cloud service on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). [Sign up](https://cloud.yugabyte.com/) to get started. -[YugabyteDB Aeon](../../yugabyte-cloud/) is Yugabyte's fully-managed cloud service on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). [Sign up](https://cloud.yugabyte.com/) to get started. +{{}} -For a more detailed comparison between the above, see [Compare Deployment Options](https://www.yugabyte.com/compare-products/). +### When is YugabyteDB a good fit? -### How do I report a security vulnerability? +YugabyteDB is a good fit for fast-growing, cloud-native applications that need to serve business-critical data reliably, with zero data loss, high availability, and low latency. Common use cases include: -Please follow the steps in the [vulnerability disclosure policy](../../secure/vulnerability-disclosure-policy) to report a vulnerability to our security team. The policy outlines our commitments to you when you disclose a potential vulnerability, the reporting process, and how we will respond. +- Distributed Online Transaction Processing (OLTP) applications needing multi-region scalability without compromising strong consistency and low latency. For example, user identity, Retail product catalog, Financial data service. -## Evaluating YugabyteDB +- Hybrid Transactional/Analytical Processing (HTAP) (also known as Translytical) applications needing real-time analytics on transactional data. For example, user personalization, fraud detection, machine learning. -### What are the trade-offs involved in using YugabyteDB? +- Streaming applications needing to efficiently ingest, analyze, and store ever-growing data. For example, IoT sensor analytics, time series metrics, real-time monitoring. -Trade-offs depend on the type of database used as baseline for comparison. +### When is YugabyteDB not a good fit? -#### Distributed SQL +YugabyteDB is not a good fit for traditional Online Analytical Processing (OLAP) use cases that need complete ad-hoc analytics. Use an OLAP store such as [Druid](http://druid.io/druid.html) or a data warehouse such as [Snowflake](https://www.snowflake.net/). -Examples: Amazon Aurora, Google Cloud Spanner, CockroachDB, TiDB +### What are the trade-offs of using YugabyteDB? -**Benefits of YugabyteDB** +Ensuring [ACID](../../architecture/key-concepts/#acid) transactions and full compatibility with the PostgreSQL API presents challenges in a distributed environment. The trade-offs can also vary depending on the database you're comparing it to. Here are a few key considerations when switching to YugabyteDB: -- Low-latency reads and high-throughput writes. -- Cloud-neutral deployments with a Kubernetes-native database. -- 100% Apache 2.0 open source even for enterprise features. +- **Consistency vs. Latency**: YugabyteDB uses the [Raft](../../architecture/docdb-replication/raft) consensus algorithm for strong consistency in distributed systems. While this guarantees data integrity, it can result in higher write latency compared to eventually consistent databases like Cassandra. -**Trade-offs** +- **Increased Query Latency**: Transactions and JOINs that span multiple nodes experience inter-node latency, making queries slower than in single-node databases like PostgreSQL. -- None + {{}} + [Many projects](https://github.com/yugabyte/yugabyte-db?tab=readme-ov-file#current-roadmap) are currently in progress to match the performance of a single-node database. + {{}} -Learn more: [What is Distributed SQL?](https://www.yugabyte.com/tech/distributed-sql/) +- **Cross-Region Latency**: In multi-region or globally distributed setups, YugabyteDB replicates data across regions to ensure availability and resilience. However, this can lead to higher write latency due to cross-region coordination. -#### Monolithic SQL +- **Resource Requirements**: Being a distributed database, YugabyteDB demands more hardware and networking resources to maintain high availability and fault tolerance compared to traditional monolithic databases that run on a single machine. -Examples: PostgreSQL, MySQL, Oracle, Amazon Aurora. +- **PostgreSQL Feature Support**: Every new PostgreSQL feature must be optimized for distributed environments, which is not a simple task. Be sure to verify that the PostgreSQL features your application relies on are supported in the current version of YugabyteDB. -**Benefits of YugabyteDB** +{{}} -- Scale write throughput linearly across multiple nodes and/or geographic regions. -- Automatic failover and native repair. -- 100% Apache 2.0 open source even for enterprise features. +### What is a YugabyteDB universe -**Trade-offs** +A YugabyteDB [universe](/preview/architecture/key-concepts/#universe) comprises one [primary cluster](/preview/architecture/key-concepts/#primary-cluster) and zero or more [read replica clusters](/preview/architecture/key-concepts/#read-replica-cluster) that collectively function as a resilient and scalable distributed database. It is common to have just a primary cluster and hence the terms cluster and universe are sometimes used interchangeably but it is worthwhile to note that they are different. -- Transactions and JOINs can now span multiple nodes, thereby increasing latency. +{{}} -Learn more: [Distributed PostgreSQL on a Google Spanner Architecture – Query Layer](https://www.yugabyte.com/blog/distributed-postgresql-on-a-google-spanner-architecture-query-layer/) +### Are there any performance benchmarks available? -#### Traditional NewSQL +YugabyteDB is regularly benchmarked using a variety of standard benchmarks like [TPC-C](/preview/benchmark/tpcc/), [YCSB](/preview/benchmark/ycsb-ysql/), and [sysbench](/preview/benchmark/sysbench-ysql/). -Examples: Vitess, Citus +{{}} -**Benefits of YugabyteDB** +### How is YugabyteDB tested for correctness? -- Distributed transactions across any number of nodes. -- No single point of failure given all nodes are equal. -- 100% Apache 2.0 open source even for enterprise features. +Apart from the rigorous failure testing, YugabyteDB passes most of the scenarios in [Jepsen](https://jepsen.io/) testing. Jepsen is a methodology and toolset used to verify the correctness of distributed systems, particularly in the context of consistency models and fault tolerance and has become a standard for stress-testing distributed databases, data stores, and other distributed systems. -**Trade-offs** +{{}} -- None +### How does YugabyteDB compare to other databases? -Learn more: [Rise of Globally Distributed SQL Databases – Redefining Transactional Stores for Cloud Native Era](https://www.yugabyte.com/blog/rise-of-globally-distributed-sql-databases-redefining-transactional-stores-for-cloud-native-era/) +We have published detailed comparison information against multiple SQL and NoSQL databases: -#### Transactional NoSQL +- **SQL** - [CockroachDB](../comparisons/cockroachdb/), [TiDB](../comparisons/tidb/), [Vitess](../comparisons/vitess/), [Amazon Aurora](../comparisons/amazon-aurora/), [Google Spanner](../comparisons/google-spanner/) +- **NOSQL** - [MongoDB](../comparisons/mongodb/), [FoundationDB](../comparisons/foundationdb/), [Cassandra](../comparisons/cassandra/), [DynamoDB](../comparisons/amazon-dynamodb/), [CosmosDB](../comparisons/azure-cosmos/) -Examples: MongoDB, Amazon DynamoDB, FoundationDB, Azure Cosmos DB. +{{}} -**Benefits of YugabyteDB** +## PostgreSQL support -- Flexibility of SQL as query needs change in response to business changes. -- Distributed transactions across any number of nodes. -- Low latency, strongly consistent reads given that read-time quorum is avoided altogether. -- 100% Apache 2.0 open source even for enterprise features. +### How compatible is YugabyteDB with PostgreSQL? -**Trade-offs** +YugabyteDB is [wire-protocol, syntax, feature, and runtime](https://www.yugabyte.com/postgresql/postgresql-compatibility/) compatible with PostgreSQL. But that said, supporting all PostgreSQL features in a distributed system is not always feasible. -- None +{{}} -Learn more: [Why are NoSQL Databases Becoming Transactional?](https://www.yugabyte.com/blog/nosql-databases-becoming-transactional-mongodb-dynamodb-faunadb-cosmosdb/) +### Can I use my existing PostgreSQL tools and drivers with YugabyteDB? -#### Eventually Consistent NoSQL +Yes. YugabyteDB is [fully compatible](#how-compatible-is-yugabytedb-with-postgresql) with PostgreSQL and automatically works well with most of PostgreSQL tools. -Examples: Apache Cassandra, Couchbase. +{{}} -**Benefits of YugabyteDB** +### Are PostgreSQL extensions supported? -- Flexibility of SQL as query needs change in response to business changes. -- Strongly consistent, zero data loss writes. -- Strongly consistent as well as timeline-consistent reads without resorting to eventual consistency-related penalties such as read repairs and anti-entropy. -- 100% Apache 2.0 open source even for enterprise features. +YugabyteDB pre-bundles many popular extensions and these should be readily available on your cluster. But given the distributed nature of YugabyteDB, not all extensions are supported by default. -**Trade-offs** +{{}} -- Extremely short unavailability during the leader election time for all shard leaders lost during a node failure or network partition. +### How can I migrate from PostgreSQL? -Learn more: [Apache Cassandra: The Truth Behind Tunable Consistency, Lightweight Transactions & Secondary Indexes](https://www.yugabyte.com/blog/apache-cassandra-lightweight-transactions-secondary-indexes-tunable-consistency/) +YugabyteDB is fully compatible with PostgreSQL and so most PostgreSQL applications should work as is. To address corner cases, we have published a [comprehensive guide](/preview/manage/data-migration/migrate-from-postgres/) to help you migrate from PostgreSQL. -### When is YugabyteDB a good fit? +{{}} -YugabyteDB is a good fit for fast-growing, cloud native applications that need to serve business-critical data reliably, with zero data loss, high availability, and low latency. Common use cases include: +## Architecture -- Distributed Online Transaction Processing (OLTP) applications needing multi-region scalability without compromising strong consistency and low latency. For example, user identity, Retail product catalog, Financial data service. +### How does YugabyteDB distribute data? -- Hybrid Transactional/Analytical Processing (HTAP), also known as Translytical, applications needing real-time analytics on transactional data. For example, user personalization, fraud detection, machine learning. +The table data is split into [tablets](/preview/architecture/key-concepts/#tablet) and the table rows are mapped to the tablets via [sharding](/preview/explore/linear-scalability/data-distribution/). The tablets themselves are distributed across the various nodes in the cluster. -- Streaming applications needing to efficiently ingest, analyze, and store ever-growing data. For example, IoT sensor analytics, time series metrics, real-time monitoring. +{{}} -See some success stories at [yugabyte.com](https://www.yugabyte.com/success-stories/). +### How does YugabyteDB scale? -### When is YugabyteDB not a good fit? +YugabyteDB scales seamlessly when new nodes are added to the cluster without any service disruption. Table data is [stored distributed](#how-does-yugabytedb-distribute-data) in tablets. When new nodes are added, the rebalancer moves certain tablets to other nodes and keeps the number of tablets on each node more or less the same. As data grows, these tablets also split into two and are moved to other nodes. -YugabyteDB is not a good fit for traditional Online Analytical Processing (OLAP) use cases that need complete ad-hoc analytics. Use an OLAP store such as [Druid](http://druid.io/druid.html) or a data warehouse such as [Snowflake](https://www.snowflake.net/). +{{}} -### Any performance benchmarks available? +### How does YugabyteDB provide high availability? -[Yahoo Cloud Serving Benchmark (YCSB)](https://github.com/brianfrankcooper/YCSB/wiki) is a popular benchmarking framework for NoSQL databases. We benchmarked the Yugabyte Cloud QL (YCQL) API against standard Apache Cassandra using YCSB. YugabyteDB outperformed Apache Cassandra by increasing margins as the number of keys (data density) increased across all the 6 YCSB workload configurations. +YugabyteDB replicates [tablet](/preview/architecture/key-concepts/#tablet) data onto [followers](/preview/architecture/key-concepts/#tablet-follower) of the tablet via [RAFT](/preview/architecture/docdb-replication/raft/) consensus. This ensures that a consistent copy of the data is available in case of failures. On failures, one of the tablet followers is promoted to be the [leader](/preview/architecture/key-concepts/#tablet-leader). -[Netflix Data Benchmark (NDBench)](https://github.com/Netflix/ndbench) is another publicly available, cloud-enabled benchmark tool for data store systems. We ran NDBench against YugabyteDB for 7 days and observed P99 and P995 latencies that were orders of magnitude less than that of Apache Cassandra. +{{}} -Details for both the above benchmarks are published in [Building a Strongly Consistent Cassandra with Better Performance](https://www.yugabyte.com/blog/building-a-strongly-consistent-cassandra-with-better-performance-aa96b1ab51d6). +### How is data consistency maintained across multiple nodes? -### What about correctness testing? +Every write (insert, update, delete) to the data is replicated via [RAFT](/preview/architecture/docdb-replication/raft/) consensus to [tablet followers](/preview/architecture/key-concepts/#tablet-follower) as per the [replication factor (RF)](/preview/architecture/key-concepts/#replication-factor-rf) of the cluster. Before acknowledging the write operation back to the client, YugabyteDB ensures that the data is replicated to a quorum (RF/2 + 1) of followers. -[Jepsen](https://jepsen.io/) is a widely used framework to evaluate the behavior of databases under different failure scenarios. It allows for a database to be run across multiple nodes, and create artificial failure scenarios, as well as verify the correctness of the system under these scenarios. YugabyteDB 1.2 passes [formal Jepsen testing](https://www.yugabyte.com/blog/yugabyte-db-1-2-passes-jepsen-testing/). +{{}} -### How does YugabyteDB compare to other SQL and NoSQL databases? +### What is tablet splitting? -See [Compare YugabyteDB to other databases](../comparisons/) +Data is stored in [tablets](/preview/architecture/key-concepts/#tablet). As the tablet grows, the tablet splits into two. This enables some data to be moved to other nodes in the cluster. -- [Amazon Aurora](../comparisons/amazon-aurora/) -- [Google Cloud Spanner](../comparisons/google-spanner/) -- [MongoDB](../comparisons/mongodb/) -- [CockroachDB](../comparisons/cockroachdb/) +{{}} -## Architecture +### Are indexes colocated with tables? -### How does YugabyteDB's common document store work? +Indexes are not typically colocated with the base table. The sharding of indexes is based on the primary key of the index and is independent of how the main table is sharded/distributed which is based on the primary key of the table. -[DocDB](../../architecture/docdb/), YugabyteDB's distributed document store is common across all APIs, and built using a custom integration of Raft replication, distributed ACID transactions, and the RocksDB storage engine. Specifically, DocDB enhances RocksDB by transforming it from a key-value store (with only primitive data types) to a document store (with complex data types). **Every key is stored as a separate document in DocDB, irrespective of the API responsible for managing the key.** DocDB's [sharding](../../architecture/docdb-sharding/sharding/), [replication/fault-tolerance](../../architecture/docdb-replication/replication/), and [distributed ACID transactions](../../architecture/transactions/distributed-txns/) architecture are all based on the [Google Spanner design](https://research.google.com/archive/spanner-osdi2012.pdf) first published in 2012. [How We Built a High Performance Document Store on RocksDB?](https://www.yugabyte.com/blog/how-we-built-a-high-performance-document-store-on-rocksdb/) provides an in-depth look into DocDB. +{{}} +
+{{}} ### How can YugabyteDB be both CP and ensure high availability at the same time? In terms of the [CAP theorem](https://www.yugabyte.com/blog/a-for-apple-b-for-ball-c-for-cap-theorem-8e9b78600e6d), YugabyteDB is a consistent and partition-tolerant (CP) database. It ensures high availability (HA) for most practical situations even while remaining strongly consistent. While this may seem to be a violation of the CAP theorem, that is not the case. CAP treats availability as a binary option whereas YugabyteDB treats availability as a percentage that can be tuned to achieve high write availability (reads are always available as long as a single node is available). -- During network partitions or node failures, the replicas of the impacted tablets (whose leaders got partitioned out or lost) form two groups: a majority partition that can still establish a Raft consensus and a minority partition that cannot establish such a consensus (given the lack of quorum). The replicas in the majority partition elect a new leader among themselves in a matter of seconds and are ready to accept new writes after the leader election completes. For these few seconds till the new leader is elected, the DB is unable to accept new writes given the design choice of prioritizing consistency over availability. All the leader replicas in the minority partition lose their leadership during these few seconds and hence become followers. - -- Majority partitions are available for both reads and writes. Minority partitions are not available for writes, but may serve stale reads (up to a staleness as configured by the [--max_stale_read_bound_time_ms](../../reference/configuration/yb-tserver/#max-stale-read-bound-time-ms) flag). **Multi-active availability** refers to YugabyteDB's ability to dynamically adjust to the state of the cluster and serve consistent writes at any replica in the majority partition. - -- The approach above obviates the need for any unpredictable background anti-entropy operations as well as need to establish quorum at read time. As shown in the [YCSB benchmarks against Apache Cassandra](https://forum.yugabyte.com/t/ycsb-benchmark-results-for-yugabyte-and-apache-cassandra-again-with-p99-latencies/99), YugabyteDB delivers predictable p99 latencies as well as 3x read throughput that is also timeline-consistent (given no quorum is needed at read time). - -On one hand, the YugabyteDB storage and replication architecture is similar to that of [Google Cloud Spanner](https://cloudplatform.googleblog.com/2017/02/inside-Cloud-Spanner-and-the-CAP-Theorem.html), which is also a CP database with high write availability. While Google Cloud Spanner leverages Google's proprietary network infrastructure, YugabyteDB is designed work on commodity infrastructure used by most enterprise users. On the other hand, YugabyteDB's multi-model, multi-API, and tunable read latency approach is similar to that of [Azure Cosmos DB](https://azure.microsoft.com/en-us/blog/a-technical-overview-of-azure-cosmos-db/). - -A post on our blog titled [Practical Tradeoffs in Google Cloud Spanner, Azure Cosmos DB and YugabyteDB](https://www.yugabyte.com/blog/practical-tradeoffs-in-google-cloud-spanner-azure-cosmos-db-and-yugabyte-db/) goes through the above tradeoffs in more detail. - -### Why is a group of YugabyteDB nodes called a universe instead of the more commonly used term clusters? - -A YugabyteDB universe packs a lot more functionality than what people think of when referring to a cluster. In fact, in certain deployment choices, the universe subsumes the equivalent of multiple clusters and some of the operational work needed to run them. Here are just a few concrete differences, which made us feel like giving it a different name would help earmark the differences and avoid confusion: - -- A YugabyteDB universe can move into new machines, availability zones (AZs), regions, and data centers in an online fashion, while these primitives are not associated with a traditional cluster. - -- You can set up multiple asynchronous replicas with just a few clicks (using YugabyteDB Anywhere). This is built into the universe as a first-class operation with bootstrapping of the remote replica and all the operational aspects of running asynchronous replicas being supported natively. In the case of traditional clusters, the source and the asynchronous replicas are independent clusters. The user is responsible for maintaining these separate clusters as well as operating the replication logic. - -- Failover to asynchronous replicas as the primary data and fallback once the original is up and running are both natively supported in a universe. - -### Why is consistent hash sharding the default sharding strategy? - -Users primarily turn to YugabyteDB for scalability reasons. Consistent hash sharding is ideal for massively scalable workloads because it distributes data evenly across all the nodes in the cluster, while retaining ease of adding nodes into the cluster. Most use cases that require scalability do not need to perform range lookups on the primary key, so consistent hash sharding is the default sharding strategy for YugabyteDB. Common applications that do not need hash sharding include user identity (user IDs do not need ordering), product catalog (product IDs are not related to one another), and stock ticker data (one stock symbol is independent of all other stock symbols). For applications that benefit from range sharding, YugabyteDB lets you select that option. - -To learn more about sharding strategies and lessons learned, see [Four Data Sharding Strategies We Analyzed in Building a Distributed SQL Database](https://www.yugabyte.com/blog/four-data-sharding-strategies-we-analyzed-in-building-a-distributed-sql-database/). +{{}} diff --git a/docs/content/preview/manage/data-migration/migrate-from-postgres.md b/docs/content/preview/manage/data-migration/migrate-from-postgres.md index 92f2f1fb69e9..d221187827dd 100644 --- a/docs/content/preview/manage/data-migration/migrate-from-postgres.md +++ b/docs/content/preview/manage/data-migration/migrate-from-postgres.md @@ -304,13 +304,13 @@ To learn more about the various useful metrics that can be monitored, see [Metri Because of the distributed nature of YugabyteDB, queries are executed quite differently from Postgres. This is because the latency across the nodes are taken into account by the query planner. Adopting the following practices will help improve the performance of your applications. -- **Single-row transactions**: YugabyteDB has optimizations to improve the performance of transactions in certain scenarios where transactions operate on a single row. Consider converting multi-statement transactions to single-statement ones to improve performace. {{}} +- **Single-row transactions**: YugabyteDB has optimizations to improve the performance of transactions in certain scenarios where transactions operate on a single row. Consider converting multi-statement transactions to single-statement ones to improve performace. {{}} -- **Use On Conflict clause**: Use the optional ON CONFLICT clause in the INSERT statement to circumvent certain errors and avoid multiple round trips. {{}} +- **Use On Conflict clause**: Use the optional ON CONFLICT clause in the INSERT statement to circumvent certain errors and avoid multiple round trips. {{}} -- **Set statement timeouts**: Avoid getting stuck in a wait loop because of starvation by using a reasonable timeout for the statements. {{}} +- **Set statement timeouts**: Avoid getting stuck in a wait loop because of starvation by using a reasonable timeout for the statements. {{}} -- **Stored procedures**: Use stored procedures to bundle a set of statements with error handling to be executed on the server and avoid multiple round trips. {{}} +- **Stored procedures**: Use stored procedures to bundle a set of statements with error handling to be executed on the server and avoid multiple round trips. {{}} {{}} For a full list of best practices to improve performance, see [Performance tuning in YSQL](../../../develop/learn/transactions/transactions-performance-ysql/) diff --git a/docs/content/stable/architecture/design-goals.md b/docs/content/stable/architecture/design-goals.md index 777b73cf809f..86e2bc3fd406 100644 --- a/docs/content/stable/architecture/design-goals.md +++ b/docs/content/stable/architecture/design-goals.md @@ -15,15 +15,15 @@ type: docs ## Scalability -YugabyteDB scales out horizontally by adding more nodes to handle increasing data volumes and higher workloads. With YugabyteDB, you can also opt for vertical scaling choosing more powerful infrastructure components. {{}} +YugabyteDB scales out horizontally by adding more nodes to handle increasing data volumes and higher workloads. With YugabyteDB, you can also opt for vertical scaling choosing more powerful infrastructure components. {{}} ## High Availability -YugabyteDB ensures continuous availability, even in the face of individual node failures or network partitions. YugabyteDB achieves this by replicating data across multiple nodes and implementing failover mechanisms via leader election. {{}} +YugabyteDB ensures continuous availability, even in the face of individual node failures or network partitions. YugabyteDB achieves this by replicating data across multiple nodes and implementing failover mechanisms via leader election. {{}} ## Fault Tolerance -YugabyteDB is resilient to various types of failures, such as node crashes, network partitions, disk failures, and other hardware or software faults and failure of various fault domains. It can automatically recover from these failures without data loss or corruption. {{}} +YugabyteDB is resilient to various types of failures, such as node crashes, network partitions, disk failures, and other hardware or software faults and failure of various fault domains. It can automatically recover from these failures without data loss or corruption. {{}} ## Consistency @@ -64,7 +64,7 @@ YugabyteDB monitors and automatically re-balances the number of tablet leaders a ## Data locality -YugabyteDB supports colocated tables and databases which enables related data to be kept together on the same node for performance reasons. {{}} +YugabyteDB supports colocated tables and databases which enables related data to be kept together on the same node for performance reasons. {{}} ## Security @@ -111,7 +111,7 @@ In addition: ## Cassandra compatibility -[YCQL](../../api/ycql/) is a [semi-relational CQL API](../../explore/ycql-language/) that is best suited for internet-scale OLTP and HTAP applications needing massive write scalability and fast queries. YCQL supports distributed transactions, strongly-consistent secondary indexes, and a native JSON column type. YCQL has its roots in the Cassandra Query Language. {{}} +[YCQL](../../api/ycql/) is a [semi-relational CQL API](../../explore/ycql-language/) that is best suited for internet-scale OLTP and HTAP applications needing massive write scalability and fast queries. YCQL supports distributed transactions, strongly-consistent secondary indexes, and a native JSON column type. YCQL has its roots in the Cassandra Query Language. {{}} ## Performance @@ -141,7 +141,7 @@ YugabyteDB has been designed with several cloud-native principles in mind. ## Kubernetes-ready -YugabyteDB works natively in Kubernetes and other containerized environments as a stateful application. {{}} +YugabyteDB works natively in Kubernetes and other containerized environments as a stateful application. {{}} ## Open source diff --git a/docs/content/stable/architecture/key-concepts.md b/docs/content/stable/architecture/key-concepts.md index a3adb61ca725..7c8de26b75aa 100644 --- a/docs/content/stable/architecture/key-concepts.md +++ b/docs/content/stable/architecture/key-concepts.md @@ -26,7 +26,7 @@ YugabyteDB provides ACID guarantees for all [transactions](#transaction). ## CDC - Change data capture -CDC is a software design pattern used in database systems to capture and propagate data changes from one database to another in real-time or near real-time. YugabyteDB supports transactional CDC guaranteeing changes across tables are captured together. This enables use cases like real-time analytics, data warehousing, operational data replication, and event-driven architectures. {{}} +CDC is a software design pattern used in database systems to capture and propagate data changes from one database to another in real-time or near real-time. YugabyteDB supports transactional CDC guaranteeing changes across tables are captured together. This enables use cases like real-time analytics, data warehousing, operational data replication, and event-driven architectures. {{}} ## Cluster @@ -38,11 +38,11 @@ Sometimes the term *cluster* is used interchangeably with the term *universe*. H ## DocDB -DocDB is the underlying document storage engine of YugabyteDB and is built on top of a highly customized and optimized verison of [RocksDB](http://rocksdb.org/). {{}} +DocDB is the underlying document storage engine of YugabyteDB and is built on top of a highly customized and optimized verison of [RocksDB](http://rocksdb.org/). {{}} ## Fault domain -A fault domain is a potential point of failure. Examples of fault domains would be nodes, racks, zones, or entire regions. {{}} +A fault domain is a potential point of failure. Examples of fault domains would be nodes, racks, zones, or entire regions. {{}} ## Fault tolerance @@ -54,15 +54,15 @@ The fault tolerance determines how resilient the cluster is to domain (that is, Normally, only the [tablet leader](#tablet-leader) can process user-facing write and read requests. Follower reads allow you to lower read latencies by serving reads from the tablet followers. This is similar to reading from a cache, which can provide more read IOPS with low latency. The data might be slightly stale, but is timeline-consistent, meaning no out of order data is possible. -Follower reads are particularly beneficial in applications that can tolerate staleness. For instance, in a social media application where a post gets a million likes continuously, slightly stale reads are acceptable, and immediate updates are not necessary because the absolute number may not really matter to the end-user reading the post. In such cases, a slightly older value from the closest replica can achieve improved performance with lower latency. Follower reads are required when reading from [read replicas](#read-replica-cluster). {{}} +Follower reads are particularly beneficial in applications that can tolerate staleness. For instance, in a social media application where a post gets a million likes continuously, slightly stale reads are acceptable, and immediate updates are not necessary because the absolute number may not really matter to the end-user reading the post. In such cases, a slightly older value from the closest replica can achieve improved performance with lower latency. Follower reads are required when reading from [read replicas](#read-replica-cluster). {{}} ## Hybrid time -Hybrid time/timestamp is a monotonically increasing timestamp derived using [Hybrid Logical clock](../transactions/transactions-overview/#hybrid-logical-clocks). Multiple aspects of YugabyteDB's transaction model are based on hybrid time. {{}} +Hybrid time/timestamp is a monotonically increasing timestamp derived using [Hybrid Logical clock](../transactions/transactions-overview/#hybrid-logical-clocks). Multiple aspects of YugabyteDB's transaction model are based on hybrid time. {{}} ## Isolation levels -[Transaction](#transaction) isolation levels define the degree to which transactions are isolated from each other. Isolation levels determine how changes made by one transaction become visible to other concurrent transactions. {{}} +[Transaction](#transaction) isolation levels define the degree to which transactions are isolated from each other. Isolation levels determine how changes made by one transaction become visible to other concurrent transactions. {{}} {{}} YugabyteDB offers 3 isolation levels - [Serializable](../../explore/transactions/isolation-levels/#serializable-isolation), [Snapshot](../../explore/transactions/isolation-levels/#snapshot-isolation) and [Read committed](../../explore/transactions/isolation-levels/#read-committed-isolation) - in the {{}} API and one isolation level - [Snapshot](../../develop/learn/transactions/acid-transactions-ycql/) - in the {{}} API. @@ -74,11 +74,11 @@ YugabyteDB tries to keep the number of leaders evenly distributed across the [no ## Leader election -Amongst the [tablet](#tablet) replicas, one tablet is elected [leader](#tablet-leader) as per the [Raft](../docdb-replication/raft) protocol. {{}} +Amongst the [tablet](#tablet) replicas, one tablet is elected [leader](#tablet-leader) as per the [Raft](../docdb-replication/raft) protocol. {{}} ## Master server -The [YB-Master](../yb-master/) service is responsible for keeping system metadata, coordinating system-wide operations, such as creating, altering, and dropping tables, as well as initiating maintenance operations such as load balancing. {{}} +The [YB-Master](../yb-master/) service is responsible for keeping system metadata, coordinating system-wide operations, such as creating, altering, and dropping tables, as well as initiating maintenance operations such as load balancing. {{}} {{}} The master server is also typically referred as just **master**. @@ -86,7 +86,7 @@ The master server is also typically referred as just **master**. ## MVCC -MVCC stands for Multi-version Concurrency Control. It is a concurrency control method used by YugabyteDB to provide access to data in a way that allows concurrent queries and updates without causing conflicts. {{}} +MVCC stands for Multi-version Concurrency Control. It is a concurrency control method used by YugabyteDB to provide access to data in a way that allows concurrent queries and updates without causing conflicts. {{}} ## Namespace @@ -118,7 +118,7 @@ Designating one region as preferred can reduce the number of network hops needed Regardless of the preferred region setting, data is replicated across all the regions in the cluster to ensure region-level fault tolerance. -You can enable [follower reads](#follower-reads) to serve reads from non-preferred regions. In cases where the cluster has [read replicas](#read-replica-cluster) and a client connects to a read replica, reads are served from the replica; writes continue to be handled by the preferred region. {{}} +You can enable [follower reads](#follower-reads) to serve reads from non-preferred regions. In cases where the cluster has [read replicas](#read-replica-cluster) and a client connects to a read replica, reads are served from the replica; writes continue to be handled by the preferred region. {{}} ## Primary cluster @@ -126,17 +126,17 @@ A primary cluster can perform both writes and reads, unlike a [read replica clus ## Raft -Raft stands for Replication for availability and fault tolerance. This is the algorithm that YugabyteDB uses for replication guaranteeing consistency. {{}} +Raft stands for Replication for availability and fault tolerance. This is the algorithm that YugabyteDB uses for replication guaranteeing consistency. {{}} ## Read replica cluster Read replica clusters are optional clusters that can be set up in conjunction with a [primary cluster](#primary-cluster) to perform only reads; writes sent to read replica clusters get automatically rerouted to the primary cluster of the [universe](#universe). These clusters enable reads in regions that are far away from the primary cluster with timeline-consistent data. This ensures low latency reads for geo-distributed applications. -Data is brought into the read replica clusters through asynchronous replication from the primary cluster. In other words, [nodes](#node) in a read replica cluster act as Raft observers that do not participate in the write path involving the Raft leader and Raft followers present in the primary cluster. Reading from read replicas requires enabling [follower reads](#follower-reads). {{}} +Data is brought into the read replica clusters through asynchronous replication from the primary cluster. In other words, [nodes](#node) in a read replica cluster act as Raft observers that do not participate in the write path involving the Raft leader and Raft followers present in the primary cluster. Reading from read replicas requires enabling [follower reads](#follower-reads). {{}} ## Rebalancing -Rebalancing is the process of keeping an even distribution of tablets across the [nodes](#node) in a cluster. {{}} +Rebalancing is the process of keeping an even distribution of tablets across the [nodes](#node) in a cluster. {{}} ## Region @@ -146,24 +146,24 @@ A region refers to a defined geographical area or location where a cloud provide The number of copies of data in a YugabyteDB universe. YugabyteDB replicates data across [fault domains](#fault-domain) (for example, zones) in order to tolerate faults. [Fault tolerance](#fault-tolerance) (FT) and RF are correlated. To achieve a FT of k nodes, the universe has to be configured with a RF of (2k + 1). -The RF should be an odd number to ensure majority consensus can be established during failures. {{}} +The RF should be an odd number to ensure majority consensus can be established during failures. {{}} Each [read replica](#read-replica-cluster) cluster can also have its own replication factor. In this case, the replication factor determines how many copies of your primary data the read replica has; multiple copies ensure the availability of the replica in case of a node outage. Replicas *do not* participate in the primary cluster Raft consensus, and do not affect the fault tolerance of the primary cluster or contribute to failover. ## Sharding -Sharding is the process of mapping a table row to a [tablet](#tablet). YugabyteDB supports 2 types of sharding, Hash and Range. {{}} +Sharding is the process of mapping a table row to a [tablet](#tablet). YugabyteDB supports 2 types of sharding, Hash and Range. {{}} ## Smart driver A smart driver in the context of YugabyteDB is essentially a PostgreSQL driver with additional "smart" features that leverage the distributed nature of YugabyteDB. These smart drivers intelligently distribute application connections across the nodes and regions of a YugabyteDB cluster, eliminating the need for external load balancers. This results in balanced connections that provide lower latencies and prevent hot nodes. For geographically-distributed applications, the driver can seamlessly connect to the geographically nearest regions and availability zones for lower latency. Smart drivers are optimized for use with a distributed SQL database, and are both cluster-aware and topology-aware. They keep track of the members of the cluster as well as their locations. As nodes are added or removed from clusters, the driver updates its membership and topology information. The drivers read the database cluster topology from the metadata table, and route new connections to individual instance endpoints without relying on high-level cluster endpoints. The smart drivers are also capable of load balancing read-only connections across the available YB-TServers. -. {{}} +. {{}} ## Tablet -YugabyteDB splits a table into multiple small pieces called tablets for data distribution. The word "tablet" finds its origins in ancient history, when civilizations utilized flat slabs made of clay or stone as surfaces for writing and maintaining records. {{}} +YugabyteDB splits a table into multiple small pieces called tablets for data distribution. The word "tablet" finds its origins in ancient history, when civilizations utilized flat slabs made of clay or stone as surfaces for writing and maintaining records. {{}} {{}} Tablets are also referred as shards. @@ -179,15 +179,15 @@ In a cluster, each [tablet](#tablet) is replicated as per the [replication facto ## Tablet splitting -When a tablet reaches a threshold size, it splits into 2 new [tablets](#tablet). This is a very quick operation. {{}} +When a tablet reaches a threshold size, it splits into 2 new [tablets](#tablet). This is a very quick operation. {{}} ## Transaction -A transaction is a sequence of operations performed as a single logical unit of work. YugabyteDB provides [ACID](#acid) guarantees for transactions. {{}} +A transaction is a sequence of operations performed as a single logical unit of work. YugabyteDB provides [ACID](#acid) guarantees for transactions. {{}} ## TServer -The [YB-TServer](../yb-tserver) service is responsible for maintaining and managing table data in the form of tablets, as well as dealing with all the queries. {{}} +The [YB-TServer](../yb-tserver) service is responsible for maintaining and managing table data in the form of tablets, as well as dealing with all the queries. {{}} ## Universe @@ -199,19 +199,19 @@ Sometimes the terms *universe* and *cluster* are used interchangeably. The two a ## xCluster -xCluster is a type of deployment where data is replicated asynchronously between two [universes](#universe) - a primary and a standby. The standby can be used for disaster recovery. YugabyteDB supports transactional xCluster {{}}. +xCluster is a type of deployment where data is replicated asynchronously between two [universes](#universe) - a primary and a standby. The standby can be used for disaster recovery. YugabyteDB supports transactional xCluster {{}}. ## YCQL -Semi-relational SQL API that is best fit for internet-scale OLTP and HTAP apps needing massive write scalability as well as blazing-fast queries. It supports distributed transactions, strongly consistent secondary indexes, and a native JSON column type. YCQL has its roots in the Cassandra Query Language. {{}} +Semi-relational SQL API that is best fit for internet-scale OLTP and HTAP apps needing massive write scalability as well as blazing-fast queries. It supports distributed transactions, strongly consistent secondary indexes, and a native JSON column type. YCQL has its roots in the Cassandra Query Language. {{}} ## YQL -The YugabyteDB Query Layer (YQL) is the primary layer that provides interfaces for applications to interact with using client drivers. This layer deals with the API-specific aspects such as query/command compilation and the run-time (data type representations, built-in operations, and more). {{}} +The YugabyteDB Query Layer (YQL) is the primary layer that provides interfaces for applications to interact with using client drivers. This layer deals with the API-specific aspects such as query/command compilation and the run-time (data type representations, built-in operations, and more). {{}} ## YSQL -Fully-relational SQL API that is wire compatible with the SQL language in PostgreSQL. It is best fit for RDBMS workloads that need horizontal write scalability and global data distribution while also using relational modeling features such as JOINs, distributed transactions, and referential integrity (such as foreign keys). Note that YSQL reuses the native query layer of the PostgreSQL open source project. {{}} +Fully-relational SQL API that is wire compatible with the SQL language in PostgreSQL. It is best fit for RDBMS workloads that need horizontal write scalability and global data distribution while also using relational modeling features such as JOINs, distributed transactions, and referential integrity (such as foreign keys). Note that YSQL reuses the native query layer of the PostgreSQL open source project. {{}} ## Zone diff --git a/docs/content/stable/manage/data-migration/migrate-from-postgres.md b/docs/content/stable/manage/data-migration/migrate-from-postgres.md index c812a7769c76..33fed0999619 100644 --- a/docs/content/stable/manage/data-migration/migrate-from-postgres.md +++ b/docs/content/stable/manage/data-migration/migrate-from-postgres.md @@ -300,13 +300,13 @@ Regularly monitor the target database to ensure it is performing efficiently. Th Because of the distributed nature of YugabyteDB, queries are executed quite differently from Postgres. This is because the latency across the nodes are taken into account by the query planner. Adopting the following practices will help improve the performance of your applications. -- **Single-row transactions**: YugabyteDB has optimizations to improve the performance of transactions in certain scenarios where transactions operate on a single row. Consider converting multi-statement transactions to single-statement ones to improve performace. {{}} +- **Single-row transactions**: YugabyteDB has optimizations to improve the performance of transactions in certain scenarios where transactions operate on a single row. Consider converting multi-statement transactions to single-statement ones to improve performace. {{}} -- **Use On Conflict clause**: Use the optional ON CONFLICT clause in the INSERT statement to circumvent certain errors and avoid multiple round trips. {{}} +- **Use On Conflict clause**: Use the optional ON CONFLICT clause in the INSERT statement to circumvent certain errors and avoid multiple round trips. {{}} -- **Set statement timeouts**: Avoid getting stuck in a wait loop because of starvation by using a reasonable timeout for the statements. {{}} +- **Set statement timeouts**: Avoid getting stuck in a wait loop because of starvation by using a reasonable timeout for the statements. {{}} -- **Stored procedures**: Use stored procedures to bundle a set of statements with error handling to be executed on the server and avoid multiple round trips. {{}} +- **Stored procedures**: Use stored procedures to bundle a set of statements with error handling to be executed on the server and avoid multiple round trips. {{}} {{}} For a full list of best practices to improve performance, see [Performance tuning in YSQL](../../../develop/learn/transactions/transactions-performance-ysql/) diff --git a/docs/layouts/shortcodes/link.html b/docs/layouts/shortcodes/link.html index 88ce3d03f28c..5bf27bdcaa12 100644 --- a/docs/layouts/shortcodes/link.html +++ b/docs/layouts/shortcodes/link.html @@ -1,5 +1,7 @@ {{/* */}} -{{- $url :=.Get 0 -}} +{{- $url :=.Get "dest" -}} +{{- $text := .Get "text" -}} +{{- $before := eq (.Get "icon-before") "true" -}} {{/* get the page path so determine depth and version */}} {{- $path := split $.Page.File.Dir "/" -}} {{/* version is the first path of the path. eg: preview/stable/v2.12 ...*/}} @@ -10,5 +12,9 @@ {{- if and (strings.HasPrefix $url "http") (not (strings.HasPrefix $url "https://docs.yugabyte")) -}} {{- $icon = "fa-arrow-up-right-from-square" -}} {{- end -}} + + {{- if $before -}} {{ end -}} + {{if $text}}{{$text}} {{end}} - + {{- if not $before -}} {{- end -}} + diff --git a/docs/layouts/shortcodes/release.html b/docs/layouts/shortcodes/release.html index d1fdbc924a8f..e20c8bcd3fbc 100644 --- a/docs/layouts/shortcodes/release.html +++ b/docs/layouts/shortcodes/release.html @@ -6,6 +6,14 @@ {{- $numversions := len $versions -}} {{- $count := 0 -}} {{- range $version := $versions -}} + {{- if or (eq $version "preview") (eq $version "stable") -}} + {{- range page.Site.Data.currentVersions.dbVersions -}} + {{- if eq $version .alias -}} + {{- $version = .version -}} + {{- break -}} + {{- end -}} + {{- end -}} + {{- end -}} {{- /* retain the original text for display */ -}} {{- $orig := trim $version " " -}} {{- /* trim the spaces,+,.,x,X */ -}} From 063dbe5dc2b986c089c461dcfb6b471a607277f5 Mon Sep 17 00:00:00 2001 From: Minghui Yang Date: Mon, 9 Sep 2024 20:38:44 +0000 Subject: [PATCH 44/75] [#23786] YSQL: yb_make_next_ddl_statement_nonincrementing Summary: In a recent customer issue investigation, the customer performed an application upgrade, which involves running many DDLs (100+) consecutively. Many of these DDLs cause catalog version to increment. As a result, all the active connections kept doing catalog cache refreshes continuously that led to PG memory spike and latency spike. It will be beneficial to reduce the number of DDLs that increment the catalog version. After analyzing customer's DDL statements that have caused catalog version to increment, they can be classified as 3 categories: (1) create index statements that build indexes on a newly created table (2) create new partition statements and link them to the existing parent partition (3) create new table but referencing an existing table via foreign key For (1), because the table itself is newly created, there is no need for the create index statement to increment the catalog version. To make things worse, a create index statement by default runs concurrently, and increments the catalog version by 3. In this particular customer's situation, more than half of the DDLs belong to category (1). One solution is to combine create statement and the create index statement together. Using an example found online: ``` yugabyte=# create table foo ( yugabyte(# id serial primary key, yugabyte(# code integer, yugabyte(# label text); CREATE TABLE yugabyte=# create unique index foo_idx on foo using btree (code, label); NOTICE: index method "btree" was replaced with "lsm" in YugabyteDB CREATE INDEX yugabyte=# select * from pg_yb_catalog_version; db_oid | current_version | last_breaking_version --------+-----------------+----------------------- 1 | 1 | 1 13248 | 1 | 1 13249 | 1 | 1 13251 | 4 | 1 13252 | 1 | 1 (5 rows) ``` By create table and then create a unique index, the catalog version was incremented by 3. On the other hand, we can combine them into one create table statement: ``` yugabyte=# create table foo ( yugabyte(# id serial primary key, yugabyte(# code integer, yugabyte(# label text, yugabyte(# constraint foo_uq unique (code, label)); select *CREATE TABLE yugabyte=# select * from pg_yb_catalog_version; db_oid | current_version | last_breaking_version --------+-----------------+----------------------- 1 | 1 | 1 13248 | 1 | 1 13249 | 1 | 1 13251 | 1 | 1 13252 | 1 | 1 (5 rows) ``` We can see that there was no catalog version increment. However it is not always possible to do these rewrite. The create index statement does allow more options, such as specifying ASC, DESC which isn't allowed when declaring a uniqe constraint in the create table statement itself. It is in such a case we introduce the new GUC `yb_make_next_ddl_statement_nonincrementing` to help. This is an auto-reset gflag that is done similar to the existing GUC `yb_make_next_ddl_statement_nonbreaking`. When set to true, it will suppress the next DDL statement from incrementing both the `current_version` and the `last_breaking_version`. Note that in order for the GUC to work for create index statement, we need to use the nonconcurrent keyword. By default the create index statement runs concurrently and its algorithm involves incrementing the catalog version by 3 to work correctly. A create index noncurrently only bumps up the catalog version by 1. For a newly created table, because it is empty, it is safe and correct to run create index nonconcurrently. For example, ``` yugabyte=# create table foo(id int); CREATE TABLE yugabyte=# select * from pg_yb_catalog_version; db_oid | current_version | last_breaking_version --------+-----------------+----------------------- 1 | 1 | 1 13248 | 1 | 1 13249 | 1 | 1 13251 | 1 | 1 13252 | 1 | 1 (5 rows) yugabyte=# create index nonconcurrently id_idx on foo(id); CREATE INDEX yugabyte=# select * from pg_yb_catalog_version; db_oid | current_version | last_breaking_version --------+-----------------+----------------------- 1 | 1 | 1 13248 | 1 | 1 13249 | 1 | 1 13251 | 2 | 1 13252 | 1 | 1 (5 rows) ``` With the new GUC `yb_make_next_ddl_statement_nonincrementing`, the catalog version will stay the same: ``` yugabyte=# create table foo(id int); CREATE TABLE yugabyte=# select * from pg_yb_catalog_version; db_oid | current_version | last_breaking_version --------+-----------------+----------------------- 1 | 1 | 1 13248 | 1 | 1 13249 | 1 | 1 13251 | 1 | 1 13252 | 1 | 1 (5 rows) yugabyte=# set yb_make_next_ddl_statement_nonincrementing = true; SET yugabyte=# create index nonconcurrently id_idx on foo(id); CREATE INDEX yugabyte=# select * from pg_yb_catalog_version; db_oid | current_version | last_breaking_version --------+-----------------+----------------------- 1 | 1 | 1 13248 | 1 | 1 13249 | 1 | 1 13251 | 1 | 1 13252 | 1 | 1 (5 rows) ``` A new unit test is added. Jira: DB-12689 Test Plan: ./yb_build.sh --cxx-test pg_catalog_version-test --gtest_filter PgCatalogVersionTest.NonIncrementingDDLMode Reviewers: kfranz Reviewed By: kfranz Subscribers: svc_phabricator, yql Differential Revision: https://phorge.dev.yugabyte.com/D37915 --- src/postgres/src/backend/utils/misc/guc.c | 12 ++ .../src/backend/utils/misc/pg_yb_utils.c | 30 ++++- src/postgres/src/include/pg_yb_utils.h | 6 + .../yql/pgwrapper/pg_catalog_version-test.cc | 111 ++++++++++++++++++ 4 files changed, 156 insertions(+), 3 deletions(-) diff --git a/src/postgres/src/backend/utils/misc/guc.c b/src/postgres/src/backend/utils/misc/guc.c index b1b7383c7287..95323cebf7df 100644 --- a/src/postgres/src/backend/utils/misc/guc.c +++ b/src/postgres/src/backend/utils/misc/guc.c @@ -2450,6 +2450,18 @@ static struct config_bool ConfigureNamesBool[] = NULL, NULL, NULL }, + { + {"yb_make_next_ddl_statement_nonincrementing", PGC_SUSET, CUSTOM_OPTIONS, + gettext_noop("When set, the next ddl statement will not cause " + "catalog version to increment. This only affects " + "the next ddl statement and resets automatically."), + NULL + }, + &yb_make_next_ddl_statement_nonincrementing, + false, + NULL, NULL, NULL + }, + { {"yb_plpgsql_disable_prefetch_in_for_query", PGC_USERSET, QUERY_TUNING, gettext_noop("Disable prefetching in a PLPGSQL FOR loop over a query."), diff --git a/src/postgres/src/backend/utils/misc/pg_yb_utils.c b/src/postgres/src/backend/utils/misc/pg_yb_utils.c index b3547855cebb..168158e5d905 100644 --- a/src/postgres/src/backend/utils/misc/pg_yb_utils.c +++ b/src/postgres/src/backend/utils/misc/pg_yb_utils.c @@ -1376,6 +1376,7 @@ bool yb_enable_index_aggregate_pushdown = true; bool yb_enable_optimizer_statistics = false; bool yb_bypass_cond_recheck = true; bool yb_make_next_ddl_statement_nonbreaking = false; +bool yb_make_next_ddl_statement_nonincrementing = false; bool yb_plpgsql_disable_prefetch_in_for_query = false; bool yb_enable_sequence_pushdown = true; bool yb_disable_wait_for_backends_catalog_version = false; @@ -1555,7 +1556,7 @@ MergeCatalogModificationAspects( } static void -YBResetEnableNonBreakingDDLMode() +YBResetEnableSpecialDDLMode() { /* * Reset yb_make_next_ddl_statement_nonbreaking to avoid its further side @@ -1567,6 +1568,17 @@ YBResetEnableNonBreakingDDLMode() if (YbIsClientYsqlConnMgr() && yb_make_next_ddl_statement_nonbreaking) YbSendParameterStatusForConnectionManager("yb_make_next_ddl_statement_nonbreaking", "false"); yb_make_next_ddl_statement_nonbreaking = false; + + /* + * Reset yb_make_next_ddl_statement_nonincrementing to avoid its further side + * effect that may not be intended. + * + * Also, reset Connection Manager cache if the value was cached to begin + * with. + */ + if (YbIsClientYsqlConnMgr() && yb_make_next_ddl_statement_nonincrementing) + YbSendParameterStatusForConnectionManager("yb_make_next_ddl_statement_nonincrementing", "false"); + yb_make_next_ddl_statement_nonincrementing = false; } /* @@ -1610,7 +1622,7 @@ YBResetDdlState() status = YbMemCtxReset(ddl_transaction_state.mem_context); } ddl_transaction_state = (struct DdlTransactionState){0}; - YBResetEnableNonBreakingDDLMode(); + YBResetEnableSpecialDDLMode(); HandleYBStatus(YBCPgClearSeparateDdlTxnMode()); HandleYBStatus(status); } @@ -1684,7 +1696,7 @@ YBDecrementDdlNestingLevel() if (GetCurrentMemoryContext() == ddl_transaction_state.mem_context) MemoryContextSwitchTo(ddl_transaction_state.mem_context->parent); - YBResetEnableNonBreakingDDLMode(); + YBResetEnableSpecialDDLMode(); bool increment_done = false; bool is_silent_altering = false; if (has_write) @@ -2241,6 +2253,18 @@ YbDdlModeOptional YbGetDdlMode( */ if (yb_make_next_ddl_statement_nonbreaking) is_breaking_change = false; + /* + * If yb_make_next_ddl_statement_nonincrementing is true, then no DDL statement + * will cause a catalog version to increment. Note that we also disable breaking + * catalog change as well because it does not make sense to only increment + * breaking breaking catalog version. + */ + if (yb_make_next_ddl_statement_nonincrementing) + { + is_version_increment = false; + is_breaking_change = false; + } + is_altering_existing_data |= is_version_increment; diff --git a/src/postgres/src/include/pg_yb_utils.h b/src/postgres/src/include/pg_yb_utils.h index c7d53142f5da..2766c684ed1a 100644 --- a/src/postgres/src/include/pg_yb_utils.h +++ b/src/postgres/src/include/pg_yb_utils.h @@ -489,6 +489,12 @@ extern bool yb_bypass_cond_recheck; */ extern bool yb_make_next_ddl_statement_nonbreaking; +/* + * Enables nonincrementing DDL mode in which a DDL statement is considered as a + * "same version DDL" and therefore will not cause catalog version to increment. + */ +extern bool yb_make_next_ddl_statement_nonincrementing; + /* * Allows capability to disable prefetching in a PLPGSQL FOR loop over a query. * This is introduced for some test(s) with lazy evaluation in READ COMMITTED diff --git a/src/yb/yql/pgwrapper/pg_catalog_version-test.cc b/src/yb/yql/pgwrapper/pg_catalog_version-test.cc index 5acd6babe0a0..e809828af032 100644 --- a/src/yb/yql/pgwrapper/pg_catalog_version-test.cc +++ b/src/yb/yql/pgwrapper/pg_catalog_version-test.cc @@ -49,6 +49,13 @@ class PgCatalogVersionTest : public LibPqTestBase { "--allowed_preview_flags_csv=ysql_enable_db_catalog_version_mode"); } + Result GetCatalogVersion(PGConn* conn) { + const auto db_oid = VERIFY_RESULT(conn->FetchRow(Format( + "SELECT oid FROM pg_database WHERE datname = '$0'", PQdb(conn->get())))); + return conn->FetchRow( + Format("SELECT current_version FROM pg_yb_catalog_version where db_oid = $0", db_oid)); + } + // Prepare the table pg_yb_catalog_version according to 'per_database_mode': // * if 'per_database_mode' is true, we prepare table pg_yb_catalog_version // for per-database catalog version mode by updating the table to have one @@ -1525,6 +1532,110 @@ TEST_F(PgCatalogVersionTest, NonBreakingDDLMode) { ASSERT_OK(conn1.Execute("ABORT")); } +TEST_F(PgCatalogVersionTest, NonIncrementingDDLMode) { + const string kDatabaseName = "yugabyte"; + + auto conn = ASSERT_RESULT(ConnectToDB(kDatabaseName)); + ASSERT_OK(conn.Execute("CREATE TABLE t1(a int)")); + auto version = ASSERT_RESULT(GetCatalogVersion(&conn)); + + // REVOKE bumps up the catalog version by 1. + ASSERT_OK(conn.Execute("REVOKE SELECT ON t1 FROM public")); + auto new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + ASSERT_EQ(new_version, version + 1); + version = new_version; + + // GRANT bumps up the catalog version by 1. + ASSERT_OK(conn.Execute("GRANT SELECT ON t1 TO public")); + new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + ASSERT_EQ(new_version, version + 1); + version = new_version; + + ASSERT_OK(conn.Execute("CREATE INDEX idx1 ON t1(a)")); + new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + // By default CREATE INDEX runs concurrently and its algorithm requires to bump up catalog + // version 3 times. + ASSERT_EQ(new_version, version + 3); + version = new_version; + + // CREATE INDEX CONCURRENTLY bumps up catalog version by 1. + ASSERT_OK(conn.Execute("CREATE INDEX NONCONCURRENTLY idx2 ON t1(a)")); + new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + ASSERT_EQ(new_version, version + 1); + version = new_version; + + // Let's start over, but this time use yb_make_next_ddl_statement_nonincrementing to suppress + // incrementing catalog version. + ASSERT_OK(conn.Execute("SET yb_make_next_ddl_statement_nonincrementing TO TRUE")); + ASSERT_OK(conn.Execute("REVOKE SELECT ON t1 FROM public")); + new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + ASSERT_EQ(new_version, version); + + ASSERT_OK(conn.Execute("SET yb_make_next_ddl_statement_nonincrementing TO TRUE")); + ASSERT_OK(conn.Execute("GRANT SELECT ON t1 TO public")); + new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + ASSERT_EQ(new_version, version); + + ASSERT_OK(conn.Execute("SET yb_make_next_ddl_statement_nonincrementing TO TRUE")); + ASSERT_OK(conn.Execute("CREATE INDEX idx3 ON t1(a)")); + new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + // By default CREATE INDEX runs concurrently and its algorithm requires to bump up catalog + // version 3 times, only the first bump is suppressed. + ASSERT_EQ(new_version, version + 2); + version = new_version; + + ASSERT_OK(conn.Execute("SET yb_make_next_ddl_statement_nonincrementing TO TRUE")); + ASSERT_OK(conn.Execute("CREATE INDEX NONCONCURRENTLY idx4 ON t1(a)")); + new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + ASSERT_EQ(new_version, version); + + // Verify that the session variable yb_make_next_ddl_statement_nonbreaking auto-resets to false. + ASSERT_OK(conn.Execute("REVOKE SELECT ON t1 FROM public")); + new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + ASSERT_EQ(new_version, version + 1); + version = new_version; + + // Since yb_make_next_ddl_statement_nonbreaking auto-resets to false, we should see catalog + // version gets bumped up as before. + ASSERT_OK(conn.Execute("GRANT SELECT ON t1 TO public")); + new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + ASSERT_EQ(new_version, version + 1); + version = new_version; + + ASSERT_OK(conn.Execute("CREATE INDEX idx5 ON t1(a)")); + new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + ASSERT_EQ(new_version, version + 3); + version = new_version; + + ASSERT_OK(conn.Execute("CREATE INDEX NONCONCURRENTLY idx6 ON t1(a)")); + new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + ASSERT_EQ(new_version, version + 1); + version = new_version; + + // Now test the scenario where we create a new table, followed by create index nonconcurrently + // on the new table. Use yb_make_next_ddl_statement_nonbreaking to suppress catalog version + // increment on the create index statement. + // First create a second connection conn2. + auto conn2 = ASSERT_RESULT(ConnectToDB(kDatabaseName)); + + ASSERT_OK(conn.Execute("CREATE TABLE demo (a INT, b INT)")); + ASSERT_OK(conn.Execute("SET yb_make_next_ddl_statement_nonincrementing TO TRUE")); + ASSERT_OK(conn.Execute("CREATE INDEX NONCONCURRENTLY a_idx ON demo (a)")); + new_version = ASSERT_RESULT(GetCatalogVersion(&conn)); + ASSERT_EQ(new_version, version); + + // Sanity test on conn2 write, count, select and delete on the new table created on conn. + ASSERT_OK(conn2.Execute("INSERT INTO demo SELECT n, n FROM generate_series(1,100) n")); + auto row_count = ASSERT_RESULT(conn.FetchRow("SELECT COUNT(*) FROM demo")); + ASSERT_EQ(row_count, 100); + std::tuple expected_row = {50, 50}; + auto row = ASSERT_RESULT((conn2.FetchRow("SELECT * FROM demo WHERE a = 50"))); + ASSERT_EQ(row, expected_row); + ASSERT_OK(conn2.Execute("DELETE FROM demo WHERE a = 50")); + row_count = ASSERT_RESULT(conn.FetchRow("SELECT COUNT(*) FROM demo")); + ASSERT_EQ(row_count, 99); +} + TEST_F(PgCatalogVersionTest, SimulateRollingUpgrade) { // Manually switch back to non-per-db catalog version mode. RestartClusterWithoutDBCatalogVersionMode(); From a9135243439c0b57a38fff6a8050cb534bdb23bb Mon Sep 17 00:00:00 2001 From: timothy-e Date: Wed, 11 Sep 2024 11:15:05 -0400 Subject: [PATCH 45/75] [#23859] YSQL: Remove redundant Bitmap Scan filters on partial indexes Summary: One of the things that `create_bitmap_subplan` does is generates a list of conditions that are guaranteed by the tree of Bitmap Ands, Bitmap Ors, and Bitmap Index Scans. This is used to determine the clauses that remain to be checked by the Bitmap Heap Scan / YB Bitmap Table Scan. 4ea354b1b3e55e81fcd7b29fe023b037dd11b673 / D36484 introduced `yb_get_bitmap_index_quals` that just generates the list of conditions. This makes the filter information available for use during the costing phase, before the actual plan nodes need to be constructed. `create_bitmap_subplan` accounted for the clauses of partial indexes. `yb_get_bitmap_index_quals` missed including this part, which caused the planner to believe the the conditions that are implied by the partial index conditions still need to be rechecked. For example, ```lang=sql create table t (a int, b int); create index idx_a_partial on t (a asc) where b < 10; create index idx_b_partial on t (b asc) where a < 10; /*+ Set(yb_enable_bitmapscan true) BitmapScan(t) */ explain select * from t where b < 10 and a < 10; ``` Generates a bitmap subplan that looks as follows: ``` -> Bitmap Index Scan on idx_b_partial (cost=0.00..0.92 rows=8 width=0) Index Cond: (b < 10) ``` We know that `b < 10` is satisfied (by index condition) and `a < 10` is satisfied (by partial index clause). However, `yb_get_bitmap_index_quals` did not report the partial index clause, so it told the Bitmap Table Scan that only `b < 10` was satisfied - requiring the Bitmap Table Scan to use a Storage Filter to check `a < 10`. ``` QUERY PLAN ------------------------------------------ YB Bitmap Table Scan on t Storage Filter: (a < 10) -> Bitmap Index Scan on idx_b_partial Index Cond: (b < 10) (4 rows) ``` With the change in this diff, `yb_get_bitmap_index_quals` reports both clauses, so the Bitmap Table Scan knows that both conditions are already met and it does not need to add a remote filter. ``` QUERY PLAN ------------------------------------------ YB Bitmap Table Scan on t -> Bitmap Index Scan on idx_b_partial Index Cond: (b < 10) (3 rows) ``` **How did this regression get in?** I noticed `yb_pg_select` was failing in D36484. I saw the problem was with Storage Filters but did not pay much attention to whether the storage filters were added or removed. The diff was able to remove some redundant storage filters from other tests, so I assumed it was doing the same here. Jira: DB-12766 Test Plan: ``` ybd --java-test 'org.yb.pgsql.TestPgRegressPgSelect#testPgRegressPgSelect' ``` Reviewers: mtakahara Reviewed By: mtakahara Subscribers: yql Differential Revision: https://phorge.dev.yugabyte.com/D37983 --- .../src/backend/optimizer/path/costsize.c | 16 ++++++++++++++++ .../src/test/regress/expected/yb_pg_select.out | 10 ++++------ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/src/postgres/src/backend/optimizer/path/costsize.c b/src/postgres/src/backend/optimizer/path/costsize.c index 6c4dde59cc38..2f545777d7f8 100644 --- a/src/postgres/src/backend/optimizer/path/costsize.c +++ b/src/postgres/src/backend/optimizer/path/costsize.c @@ -7455,6 +7455,7 @@ yb_get_bitmap_index_quals(PlannerInfo *root, Path *bitmapqual, IndexPath *ipath = (IndexPath *) bitmapqual; IndexScan *iscan; List *indexqual; + ListCell *l; /* Use the regular indexscan plan build machinery... */ iscan = castNode(IndexScan, @@ -7469,6 +7470,21 @@ yb_get_bitmap_index_quals(PlannerInfo *root, Path *bitmapqual, indexqual = lappend(indexqual, make_ands_explicit(iscan->yb_idx_pushdown.quals)); + /* We can add any index predicate conditions, too */ + foreach(l, ipath->indexinfo->indpred) + { + Expr *pred = (Expr *) lfirst(l); + + /* + * We know that the index predicate must have been implied by the + * query condition as a whole, but it may or may not be implied by + * the conditions that got pushed into the bitmapqual. Avoid + * generating redundant conditions. + */ + if (!predicate_implied_by(list_make1(pred), indexqual, false)) + indexqual = lappend(indexqual, pred); + } + pfree(iscan); return indexqual; diff --git a/src/postgres/src/test/regress/expected/yb_pg_select.out b/src/postgres/src/test/regress/expected/yb_pg_select.out index ea7878f0bcc2..2b54e3a94617 100644 --- a/src/postgres/src/test/regress/expected/yb_pg_select.out +++ b/src/postgres/src/test/regress/expected/yb_pg_select.out @@ -626,10 +626,9 @@ select unique2 from onek2 where unique2 = 11 and stringu1 < 'B'; QUERY PLAN ------------------------------------------ YB Bitmap Table Scan on onek2 - Storage Filter: (stringu1 < 'B'::name) -> Bitmap Index Scan on onek2_u2_prtl Index Cond: (unique2 = 11) -(4 rows) +(3 rows) select unique2 from onek2 where unique2 = 11 and stringu1 < 'B'; unique2 @@ -674,18 +673,17 @@ SELECT * FROM ( select unique1, unique2 from onek2 where (unique2 = 11 and stringu1 < 'B') or unique1 = 0 LIMIT ALL) ybview ORDER BY unique2; - QUERY PLAN ----------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------ Sort Sort Key: onek2.unique2 -> YB Bitmap Table Scan on onek2 - Storage Filter: (((unique2 = 11) AND (stringu1 < 'B'::name)) OR (unique1 = 0)) -> BitmapOr -> Bitmap Index Scan on onek2_u2_prtl Index Cond: (unique2 = 11) -> Bitmap Index Scan on onek2_u1_prtl Index Cond: (unique1 = 0) -(9 rows) +(8 rows) SELECT * FROM ( select unique1, unique2 from onek2 From 41f5afdb9310c8e59360fc48197d6f87aab37602 Mon Sep 17 00:00:00 2001 From: Vipul Bansal Date: Mon, 9 Sep 2024 09:27:47 +0000 Subject: [PATCH 46/75] [PLAT-15097]: Delete System platform DB table while disabling ysql Summary: Add changes to drop write_read_test table, consistency_check table while disabling ysql Test Plan: Tested manually on k8s and VM-based universe and verified that table no longer exists from master UI Reviewers: #yba-api-review!, sneelakantan, svarshney, muthu Reviewed By: svarshney, muthu Subscribers: muthu, yugaware Differential Revision: https://phorge.dev.yugabyte.com/D37892 --- .../commissioner/tasks/UniverseTaskBase.java | 37 +++++++++- .../tasks/subtasks/DropTable.java | 70 +++++++++++++++++++ .../tasks/upgrade/ConfigureDBApis.java | 6 ++ .../upgrade/ConfigureDBApisKubernetes.java | 6 ++ .../java/com/yugabyte/yw/common/Util.java | 1 + .../yugabyte/yw/common/YsqlQueryExecutor.java | 5 ++ .../yugabyte/yw/models/helpers/TaskType.java | 2 + .../src/main/resources/swagger-strict.json | 40 +++++------ managed/src/main/resources/swagger.json | 40 +++++------ .../tasks/upgrade/ConfigureDBApisTest.java | 1 + 10 files changed, 167 insertions(+), 41 deletions(-) create mode 100644 managed/src/main/java/com/yugabyte/yw/commissioner/tasks/subtasks/DropTable.java diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/UniverseTaskBase.java b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/UniverseTaskBase.java index ff50c2fcfe6e..22aad89ff450 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/UniverseTaskBase.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/UniverseTaskBase.java @@ -3,6 +3,7 @@ package com.yugabyte.yw.commissioner.tasks; import static com.yugabyte.yw.common.Util.SYSTEM_PLATFORM_DB; +import static com.yugabyte.yw.common.Util.WRITE_READ_TABLE; import static com.yugabyte.yw.common.Util.getUUIDRepresentation; import static play.mvc.Http.Status.INTERNAL_SERVER_ERROR; @@ -56,6 +57,7 @@ import com.yugabyte.yw.commissioner.tasks.subtasks.DeleteTablesFromUniverse; import com.yugabyte.yw.commissioner.tasks.subtasks.DestroyEncryptionAtRest; import com.yugabyte.yw.commissioner.tasks.subtasks.DisableEncryptionAtRest; +import com.yugabyte.yw.commissioner.tasks.subtasks.DropTable; import com.yugabyte.yw.commissioner.tasks.subtasks.EnableEncryptionAtRest; import com.yugabyte.yw.commissioner.tasks.subtasks.FreezeUniverse; import com.yugabyte.yw.commissioner.tasks.subtasks.HardRebootServer; @@ -1431,6 +1433,39 @@ public SubTaskGroup createChangeAdminPasswordTask( return subTaskGroup; } + private SubTaskGroup createDropTableTask( + Universe universe, CommonTypes.TableType tableType, String dbName, String tableName) { + SubTaskGroup subTaskGroup = createSubTaskGroup("DropTable"); + DropTable.Params dropTableParams = new DropTable.Params(); + dropTableParams.setUniverseUUID(universe.getUniverseUUID()); + dropTableParams.dbName = dbName; + dropTableParams.tableName = tableName; + dropTableParams.tableType = tableType; + DropTable task = createTask(DropTable.class); + task.initialize(dropTableParams); + task.setUserTaskUUID(getUserTaskUUID()); + subTaskGroup.addSubTask(task); + getRunnableTask().addSubTaskGroup(subTaskGroup); + return subTaskGroup; + } + + public void createDropSystemPlatformDBTablesTask( + Universe universe, SubTaskGroupType subTaskGroupType) { + createDropTableTask( + universe, + CommonTypes.TableType.PGSQL_TABLE_TYPE, + Util.SYSTEM_PLATFORM_DB, + Util.WRITE_READ_TABLE) + .setSubTaskGroupType(subTaskGroupType); + + createDropTableTask( + universe, + CommonTypes.TableType.PGSQL_TABLE_TYPE, + Util.SYSTEM_PLATFORM_DB, + Util.CONSISTENCY_CHECK) + .setSubTaskGroupType(subTaskGroupType); + } + public void checkAndCreateChangeAdminPasswordTask(Cluster primaryCluster) { boolean changeYCQLAdminPass = primaryCluster.userIntent.enableYCQL @@ -2770,7 +2805,7 @@ public SubTaskGroup createReadWriteTestTableTask(int numPartitions, boolean ifNo idColumn.sortOrder = SortOrder.ASC; TableDetails details = new TableDetails(); - details.tableName = "write_read_test"; + details.tableName = WRITE_READ_TABLE; details.keyspace = SYSTEM_PLATFORM_DB; details.columns = new ArrayList<>(); details.columns.add(idColumn); diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/subtasks/DropTable.java b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/subtasks/DropTable.java new file mode 100644 index 000000000000..a399a3dd371b --- /dev/null +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/subtasks/DropTable.java @@ -0,0 +1,70 @@ +// Copyright (c) YugaByte, Inc. + +package com.yugabyte.yw.commissioner.tasks.subtasks; + +import com.yugabyte.yw.commissioner.BaseTaskDependencies; +import com.yugabyte.yw.commissioner.tasks.UniverseTaskBase; +import com.yugabyte.yw.common.PlatformServiceException; +import com.yugabyte.yw.common.YcqlQueryExecutor; +import com.yugabyte.yw.common.YsqlQueryExecutor; +import com.yugabyte.yw.forms.UniverseTaskParams; +import com.yugabyte.yw.models.Universe; +import java.time.Duration; +import javax.inject.Inject; +import lombok.extern.slf4j.Slf4j; +import org.yb.CommonTypes; + +@Slf4j +public class DropTable extends UniverseTaskBase { + @Inject + protected DropTable(BaseTaskDependencies baseTaskDependencies) { + super(baseTaskDependencies); + } + + private static long WAIT_TIME_IN_MILLIS = 5000; + + @Inject YsqlQueryExecutor ysqlQueryExecutor; + @Inject YcqlQueryExecutor ycqlQueryExecutor; + + public static class Params extends UniverseTaskParams { + public String dbName; + public String tableName; + public CommonTypes.TableType tableType; + } + + protected Params taskParams() { + return (Params) taskParams; + } + + @Override + public String getName() { + return super.getName() + "(" + taskParams().getUniverseUUID() + ")"; + } + + @Override + public void run() { + try { + log.info("Running {}", getName()); + Universe universe = Universe.getOrBadRequest(taskParams().getUniverseUUID()); + if (taskParams().tableType.equals(CommonTypes.TableType.PGSQL_TABLE_TYPE)) { + try { + ysqlQueryExecutor.dropTable(universe, taskParams().dbName, taskParams().tableName); + waitFor(Duration.ofMillis(WAIT_TIME_IN_MILLIS)); + } catch (PlatformServiceException e) { + log.error("Error dropping table: " + e.getMessage()); + throw e; + } + } else if (taskParams().tableType.equals(CommonTypes.TableType.YQL_TABLE_TYPE)) { + throw new UnsupportedOperationException( + "Un-implemented table type: " + taskParams().tableType); + } else { + throw new IllegalArgumentException("Unsupported table type: " + taskParams().tableType); + } + } catch (Exception e) { + String msg = getName() + " failed with exception " + e.getMessage(); + log.warn(msg, e.getMessage()); + throw new RuntimeException(msg, e); + } + log.info("Completed {}", getName()); + } +} diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/upgrade/ConfigureDBApis.java b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/upgrade/ConfigureDBApis.java index a0bb7f8a207b..f409e18cdb6d 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/upgrade/ConfigureDBApis.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/upgrade/ConfigureDBApis.java @@ -57,6 +57,12 @@ public void run() { () -> { Universe universe = getUniverse(); + // Drop system_platform tables while disabling YSQL. + if (!taskParams().enableYSQL + && universe.getUniverseDetails().getPrimaryCluster().userIntent.enableYSQL) { + createDropSystemPlatformDBTablesTask(universe, getTaskSubGroupType()); + } + // Reset password to default before disable. createResetAPIPasswordTask(taskParams(), getTaskSubGroupType()); diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/upgrade/ConfigureDBApisKubernetes.java b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/upgrade/ConfigureDBApisKubernetes.java index 52ca851b534d..c42eb28cf485 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/upgrade/ConfigureDBApisKubernetes.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/upgrade/ConfigureDBApisKubernetes.java @@ -50,6 +50,12 @@ public void run() { // by the tasks below. syncTaskParamsToUserIntent(); + // Drop system_platform.write_read_table while disabling YSQL. + if (!taskParams().enableYSQL + && universe.getUniverseDetails().getPrimaryCluster().userIntent.enableYSQL) { + createDropSystemPlatformDBTablesTask(universe, getTaskSubGroupType()); + } + // Reset password to default before disable. createResetAPIPasswordTask(taskParams(), getTaskSubGroupType()); diff --git a/managed/src/main/java/com/yugabyte/yw/common/Util.java b/managed/src/main/java/com/yugabyte/yw/common/Util.java index e967a23bb1bf..3f646977fa79 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/Util.java +++ b/managed/src/main/java/com/yugabyte/yw/common/Util.java @@ -113,6 +113,7 @@ public class Util { public static final String REDACT = "REDACTED"; public static final String KEY_LOCATION_SUFFIX = "/backup_keys.json"; public static final String SYSTEM_PLATFORM_DB = "system_platform"; + public static final String WRITE_READ_TABLE = "write_read_table"; public static final int YB_SCHEDULER_INTERVAL = 2; public static final String DEFAULT_YB_SSH_USER = "yugabyte"; public static final String DEFAULT_SUDO_SSH_USER = "centos"; diff --git a/managed/src/main/java/com/yugabyte/yw/common/YsqlQueryExecutor.java b/managed/src/main/java/com/yugabyte/yw/common/YsqlQueryExecutor.java index 6e1e87786d4f..cdce2f380ea7 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/YsqlQueryExecutor.java +++ b/managed/src/main/java/com/yugabyte/yw/common/YsqlQueryExecutor.java @@ -581,4 +581,9 @@ public void updateAdminPassword(Universe universe, DatabaseSecurityFormData data allQueries.append(query); runUserDbCommands(allQueries.toString(), data.dbName, universe); } + + public void dropTable(Universe universe, String dbName, String tableName) { + String query = String.format("DROP TABLE if exists %s;", tableName); + runUserDbCommands(query, dbName, universe); + } } diff --git a/managed/src/main/java/com/yugabyte/yw/models/helpers/TaskType.java b/managed/src/main/java/com/yugabyte/yw/models/helpers/TaskType.java index 8de707698810..43a29de97de9 100644 --- a/managed/src/main/java/com/yugabyte/yw/models/helpers/TaskType.java +++ b/managed/src/main/java/com/yugabyte/yw/models/helpers/TaskType.java @@ -649,6 +649,8 @@ public enum TaskType { ChangeAdminPassword(com.yugabyte.yw.commissioner.tasks.subtasks.ChangeAdminPassword.class), + DropTable(com.yugabyte.yw.commissioner.tasks.subtasks.DropTable.class), + CreateTable(com.yugabyte.yw.commissioner.tasks.subtasks.CreateTable.class), DeleteNode(com.yugabyte.yw.commissioner.tasks.subtasks.DeleteNode.class), diff --git a/managed/src/main/resources/swagger-strict.json b/managed/src/main/resources/swagger-strict.json index f2fecde253b1..3c814caf61da 100644 --- a/managed/src/main/resources/swagger-strict.json +++ b/managed/src/main/resources/swagger-strict.json @@ -1683,7 +1683,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -3566,7 +3566,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -5626,7 +5626,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -5950,7 +5950,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -6805,7 +6805,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -9823,7 +9823,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -10373,7 +10373,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -10743,7 +10743,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -11729,7 +11729,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -11882,7 +11882,7 @@ }, "taskType" : { "description" : "Type of task to be scheduled.", - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "userEmail" : { @@ -11905,7 +11905,7 @@ }, "taskTypes" : { "items" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "type" : "array", @@ -12294,7 +12294,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -12863,7 +12863,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -13463,7 +13463,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -13719,7 +13719,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -14180,7 +14180,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -14410,7 +14410,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -14640,7 +14640,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -15130,7 +15130,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -15810,7 +15810,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { diff --git a/managed/src/main/resources/swagger.json b/managed/src/main/resources/swagger.json index 15c20fd2fac1..d7ed48845fcd 100644 --- a/managed/src/main/resources/swagger.json +++ b/managed/src/main/resources/swagger.json @@ -1695,7 +1695,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -3601,7 +3601,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -5661,7 +5661,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -5989,7 +5989,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -6848,7 +6848,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -9914,7 +9914,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -10477,7 +10477,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -10847,7 +10847,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -11841,7 +11841,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -11994,7 +11994,7 @@ }, "taskType" : { "description" : "Type of task to be scheduled.", - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "userEmail" : { @@ -12017,7 +12017,7 @@ }, "taskTypes" : { "items" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "type" : "array", @@ -12406,7 +12406,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -12975,7 +12975,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -13575,7 +13575,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -13831,7 +13831,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -14292,7 +14292,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -14522,7 +14522,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -14752,7 +14752,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -15242,7 +15242,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { @@ -15963,7 +15963,7 @@ "type" : "boolean" }, "updatingTask" : { - "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], + "enum" : [ "CloudBootstrap", "CloudCleanup", "CreateCassandraTable", "CreateUniverse", "ReadOnlyClusterCreate", "ReadOnlyKubernetesClusterDelete", "ReadOnlyClusterDelete", "CreateKubernetesUniverse", "ReadOnlyKubernetesClusterCreate", "DestroyUniverse", "PauseUniverse", "ResumeUniverse", "DestroyKubernetesUniverse", "DeleteTable", "BackupUniverse", "RestoreBackup", "MultiTableBackup", "CreateBackup", "ConfigureDBApis", "ConfigureDBApisKubernetes", "CreatePitrConfig", "DeletePitrConfig", "RestoreSnapshotSchedule", "EditUniverse", "EditKubernetesUniverse", "ExternalScript", "ImportIntoTable", "RunApiTriggeredHooks", "AddOnClusterCreate", "AddOnClusterDelete", "UpgradeUniverse", "UpdateLoadBalancerConfig", "RestartUniverse", "RestartUniverseKubernetesUpgrade", "SoftwareUpgrade", "SoftwareKubernetesUpgrade", "KubernetesOverridesUpgrade", "GFlagsUpgrade", "GFlagsKubernetesUpgrade", "CertsRotate", "CertsRotateKubernetesUpgrade", "TlsToggle", "VMImageUpgrade", "SystemdUpgrade", "RebootUniverse", "UpgradeKubernetesUniverse", "DeleteNodeFromUniverse", "StopNodeInUniverse", "StartNodeInUniverse", "AddNodeToUniverse", "RemoveNodeFromUniverse", "RebootNodeInUniverse", "ReleaseInstanceFromUniverse", "RotateAccessKey", "SetUniverseKey", "CreateAndRotateAccessKey", "CreateKMSConfig", "EditKMSConfig", "DeleteKMSConfig", "UpdateDiskSize", "UpdateKubernetesDiskSize", "StartMasterOnNode", "DeleteXClusterConfig", "SyncXClusterConfig", "CreateSupportBundle", "CreateXClusterConfig", "EditXClusterConfig", "RestartXClusterConfig", "RestartDrConfig", "SyncDrConfig", "SetTablesDrConfig", "SetDatabasesDrConfig", "CreateDrConfig", "DeleteDrConfig", "FailoverDrConfig", "SwitchoverDrConfig", "EditDrConfig", "EditDrConfigParams", "ReinstallNodeAgent", "DeleteCustomerConfig", "DeleteCustomerStorageConfig", "ResizeNode", "CreateTableSpacesInUniverse", "ThirdpartySoftwareUpgrade", "ModifyAuditLoggingConfig", "InstallYbcSoftware", "UpgradeUniverseYbc", "UpgradeYbcGFlags", "DisableYbc", "AddGFlagMetadata", "CloudProviderDelete", "CreateBackupSchedule", "CreateBackupScheduleKubernetes", "DeleteBackupSchedule", "DeleteBackupScheduleKubernetes", "EditBackupSchedule", "EditBackupScheduleKubernetes", "CloudProviderEdit", "SoftwareUpgradeYB", "SoftwareKubernetesUpgradeYB", "FinalizeUpgrade", "RollbackUpgrade", "RollbackKubernetesUpgrade", "LdapUniverseSync", "ReprovisionNode", "ReplaceNodeInUniverse", "UpdateProxyConfig", "RecommissionNodeInstance", "MasterFailover", "SyncMasterAddresses", "CreateYbaBackup", "RestoreYbaBackup", "RestoreContinuousBackup", "EnableNodeAgentInUniverse", "KubernetesCheckVolumeExpansion", "KubernetesPostExpansionCheckVolume", "NodeCertReloadTask", "UpdateUniverseConfig", "CreateRootVolumes", "ReplaceRootVolume", "ChangeInstanceType", "PersistResizeNode", "PersistSystemdUpgrade", "UpdateNodeDetails", "AddAuthorizedKey", "AnsibleClusterServerCtl", "AnsibleConfigureServers", "AnsibleDestroyServer", "PauseServer", "ResumeServer", "AnsibleSetupServer", "AnsibleCreateServer", "PrecheckNode", "PrecheckNodeDetached", "AnsibleUpdateNodeInfo", "BulkImport", "ChangeMasterConfig", "ChangeAdminPassword", "DropTable", "CreateTable", "DeleteNode", "DeleteBackup", "DeleteBackupYb", "UpdateNodeProcess", "DeleteTableFromUniverse", "DeleteTablesFromUniverse", "DeleteKeyspace", "LoadBalancerStateChange", "ModifyBlackList", "CheckUnderReplicatedTablets", "CheckFollowerLag", "CheckNodeSafeToDelete", "ManipulateDnsRecordTask", "RemoveUniverseEntry", "SetFlagInMemory", "SetNodeState", "SwamperTargetsFileUpdate", "UniverseUpdateSucceeded", "UpdateAndPersistGFlags", "UpdateAndPersistKubernetesOverrides", "HandleKubernetesNamespacedServices", "UpdatePlacementInfo", "UpdateSoftwareVersion", "UpdateUniverseYbcDetails", "YBCBackupSucceeded", "UpdateUniverseYbcGflagsDetails", "VerifyNodeSSHAccess", "WaitForDataMove", "WaitForLeaderBlacklistCompletion", "WaitForFollowerLag", "WaitForLoadBalance", "WaitForMasterLeader", "WaitForServer", "WaitForYbcServer", "WaitForTServerHeartBeats", "DeleteClusterFromUniverse", "InstanceActions", "WaitForServerReady", "WaitForClockSync", "WaitForDuration", "RunExternalScript", "RemoveAuthorizedKey", "UpdateUniverseAccessKey", "ManageLoadBalancerGroup", "BootstrapProducer", "CheckBootstrapRequired", "DeleteBootstrapIds", "DeleteReplication", "DeleteXClusterConfigEntry", "DeleteXClusterTableConfigEntry", "DeleteDrConfigEntry", "WaitForReplicationDrain", "ResetXClusterConfigEntry", "SetReplicationPaused", "ChangeXClusterRole", "SetDrStates", "UpdateDrConfigParams", "XClusterAddNamespaceToOutboundReplicationGroup", "AddNamespaceToXClusterReplication", "XClusterRemoveNamespaceFromTargetUniverse", "XClusterRemoveNamespaceFromOutboundReplication", "SetRestoreTime", "XClusterConfigSetup", "XClusterConfigSetStatus", "XClusterConfigSetStatusForTables", "XClusterConfigSetStatusForNamespaces", "XClusterConfigModifyTables", "XClusterConfigRename", "XClusterConfigSync", "XClusterConfigUpdateMasterAddresses", "XClusterInfoPersist", "ReplicateNamespaces", "CheckXUniverseAutoFlags", "PromoteSecondaryConfigToMainConfig", "DeleteRemnantStreams", "CreateOutboundReplicationGroup", "XClusterDbReplicationSetup", "DeleteReplicationOnSource", "DeleteXClusterBackupRestoreEntries", "SetRestoreState", "CloudAccessKeyCleanup", "CloudAccessKeySetup", "CloudInitializer", "CloudProviderCleanup", "CloudRegionCleanup", "CloudRegionSetup", "CloudSetup", "BackupTable", "BackupTableYb", "BackupTableYbc", "BackupUniverseKeys", "RestoreBackupYb", "RestoreBackupYbc", "RestoreUniverseKeys", "RestoreUniverseKeysYb", "RestoreUniverseKeysYbc", "SetBackupHiddenState", "SetRestoreHiddenState", "RestorePreflightValidate", "BackupPreflightValidate", "WaitForLeadersOnPreferredOnly", "EnableEncryptionAtRest", "DisableEncryptionAtRest", "DestroyEncryptionAtRest", "KubernetesCommandExecutor", "KubernetesWaitForPod", "KubernetesCheckNumPod", "SetActiveUniverseKeys", "WaitForEncryptionKeyInMemory", "UnivSetCertificate", "CreateAlertDefinitions", "ManageAlertDefinitions", "UniverseSetTlsParams", "UniverseUpdateRootCert", "ResetUniverseVersion", "DeleteCertificate", "SetNodeStatus", "CheckMasterLeader", "CheckMasters", "CheckTServers", "WaitForTServerHBs", "CreatePrometheusSwamperConfig", "PreflightNodeCheck", "RunYsqlUpgrade", "PromoteAutoFlags", "RollbackAutoFlags", "StoreAutoFlagConfigVersion", "CheckUpgrade", "CheckCertificateConfig", "CheckMemory", "CheckLocale", "CheckGlibc", "CheckSoftwareVersion", "UpdateMountedDisks", "TransferXClusterCerts", "CreateTableSpaces", "ManageOtelCollector", "UpdateAndPersistAuditLoggingConfig", "MarkUniverseForHealthScriptReUpload", "RebootServer", "HardRebootServer", "RunHooks", "UpdateUniverseTags", "UpgradeYbc", "InstallYbcSoftwareOnK8s", "InstanceExistCheck", "DeleteRootVolumes", "InstallingThirdPartySoftware", "InstallNodeAgent", "WaitForNodeAgent", "CloudImageBundleSetup", "UpdateClusterUserIntent", "UpdateClusterAPIDetails", "UpdateUniverseState", "UpdateUniverseCommunicationPorts", "UpdateUniverseIntent", "UpdateConsistencyCheck", "FreezeUniverse", "QueryLdapServer", "DbLdapSync", "CheckForClusterServers", "CheckLeaderlessTablets", "CheckNodesAreSafeToTakeDown", "ValidateNodeDiskSize", "CheckNodeReachable", "WaitStartingFromTime", "RemoveNodeAgent", "UpdateUniverseFields", "RunNodeCommand" ], "type" : "string" }, "updatingTaskUUID" : { diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/ConfigureDBApisTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/ConfigureDBApisTest.java index 26a2dacc4751..a3ac112d5644 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/ConfigureDBApisTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/ConfigureDBApisTest.java @@ -75,6 +75,7 @@ public void testDisableDbApis() { assertEquals(Success, taskInfo.getTaskState()); initMockUpgrade() .precheckTasks(getPrecheckTasks(true)) + .addTasks(TaskType.DropTable, TaskType.DropTable) .addTasks(TaskType.ChangeAdminPassword) .addTasks(TaskType.ChangeAdminPassword) .upgradeRound(UpgradeTaskParams.UpgradeOption.ROLLING_UPGRADE) From d6bbf59db8b502f790fc160ca9eef72cf5791b7b Mon Sep 17 00:00:00 2001 From: Eric Sheng Date: Mon, 16 Sep 2024 16:03:11 -0700 Subject: [PATCH 47/75] [#23890] docdb: Add filtering for bootstrap intent iterators based on min_replay_txn_start_ht Summary: This diff adds filtering for intent SST files during the transaction loading step of tablet bootstrap, using the newly introduced `min_replay_txn_start_ht`. When CDC is enabled, we persist intent SST files longer than they are otherwise needed, until CDC has streamed all transactions in the SST file and moved the retention barrier far enough ahead. This can lead to a large buildup of intent SST files which are not actually needed at bootstrap time. 8b23a4eef1a835bd144ab134f31b5e75189801fd / D35639 added changes to save `min_running_ht` periodically, and add intent SST file filtering during bootstrap time based on periodically saved values of `min_running_ht`. This can lead to data loss, if there is a transaction T such that the following is true: - T has been applied (APPLIED written to WALs) - T has intents that have been flushed to disk (this is rare but possible when CDC is disabled since in the ideal non-CDC case we never flush intents) - Changes made by T on regulardb have not been flushed - The metadata record for T is on an intent SST file whose max HT is less than min_running_ht after T apply (i.e. intentsdb flush happened between T writes and apply) - Tablet bootstrap state has been saved after T has committed These conditions will result in a `min_running_ht > T.start_time` being written to disk, and loaded during tablet bootstrap. Since regulardb changes have not been flushed, WAL replay will start from a point that includes T. However, transaction loader will not load T, because its metadata record has been excluded due to the SST file filter. This results in changes made by T being dropped, even though it has successfully committed. This change introduces a new `min_replay_txn_start_ht` and changes the intent SST file filter to be based off of periodically saved values of this new `min_replay_txn_start_ht`. `min_replay_txn_start_ht` is defined as the minimum of: - `min_running_ht` - `start_ht` of any transaction which may be read during WAL replay WAL replay begins at `bootstrap_start_op_id = min(intentsdb flushed_op_id, rocksdb flushed_op_id, retryable requests last_flushed_op_id)`. We calculate `min_replay_txn_start_ht` by maintaining a set of `(applied_op_id, start_ht)` for recently applied transactions. Transactions are added into this set when they are applied and cleaned from memory (removed from `transactions_`) and are removed when `bootstrap_start_op_id` is increased past `applied_op_id`. `min_replay_txn_start_ht` is then the minimum of `start_ht` of this set and `min_running_ht`. Since `replay_start_op_id` is only updated after flushes to disk, this ensures that any transaction whose metadata record is filtered out by the intent SST file filter will not be incorrectly loaded during WAL replay, since such a transaction would have `apply_op_id < replay_start_op_id` (the `replay_start_op_id` calculated at bootstrap time), so none of its records are read by WAL replay. **Upgrade/Rollback safety:** The `min_running_ht` field in `TabletBootstrapStatePB` was retired and a new `min_replay_txn_start_ht` field was added. There are no autoflags added because `min_replay_txn_start_ht` is only used for an optimization (intent SST file filtering) so the lack of its presence post-upgrade does not change correctness, and its presence post-rollback is simply ignored. `min_running_ht` was only used for a incorrect implementation of the optimization which was off by default, so the lack of its presence post-rollback does not harm correctness (and actually improves it if optimization was turned on) and its presence after upgrade is ignored. A different field was used for this change to ensure that values of `min_running_ht` set before upgrade are not used, since it is unsafe to use it. Jira: DB-12794 Test Plan: Added test case to reproduce the data loss scenario when filter was using `min_running_ht`: ``` ./yb_build.sh --cxx_test pgwrapper_pg_mini-test --gtest_filter PgMiniTestSingleNode.TestBootstrapOnAppliedTransactionWithIntents ``` Also confirmed that CDC stress tests stop failing after these changes. Reviewers: sergei, qhu Reviewed By: sergei Subscribers: rthallam, ybase, yql Differential Revision: https://phorge.dev.yugabyte.com/D37792 --- src/yb/common/opid.h | 15 ++ src/yb/consensus/consensus.proto | 5 +- src/yb/tablet/tablet.cc | 47 +++- src/yb/tablet/tablet.h | 1 + src/yb/tablet/tablet_bootstrap.cc | 14 +- .../tablet/tablet_bootstrap_state_flusher.cc | 2 +- .../tablet/tablet_bootstrap_state_flusher.h | 3 + .../tablet/tablet_bootstrap_state_manager.cc | 49 +++- .../tablet/tablet_bootstrap_state_manager.h | 11 +- src/yb/tablet/tablet_peer.cc | 19 +- src/yb/tablet/tablet_peer.h | 4 +- src/yb/tablet/transaction_loader.cc | 6 +- src/yb/tablet/transaction_loader.h | 2 +- src/yb/tablet/transaction_participant.cc | 220 ++++++++++++++++-- src/yb/tablet/transaction_participant.h | 13 +- .../tablet/transaction_participant_context.h | 2 + src/yb/util/algorithm_util.h | 12 + src/yb/yql/pgwrapper/pg_mini-test.cc | 52 +++++ 18 files changed, 409 insertions(+), 68 deletions(-) diff --git a/src/yb/common/opid.h b/src/yb/common/opid.h index e2513f09ed30..9b375c91795c 100644 --- a/src/yb/common/opid.h +++ b/src/yb/common/opid.h @@ -84,6 +84,9 @@ struct OpId { std::string ToString() const; + static OpId MinValid(const OpId& lhs, const OpId& rhs); + static OpId MaxValid(const OpId& lhs, const OpId& rhs); + // Parse OpId from TERM.INDEX string. static Result FromString(Slice input); }; @@ -112,6 +115,18 @@ inline bool operator>=(const OpId& lhs, const OpId& rhs) { return !(lhs < rhs); } +inline OpId OpId::MinValid(const OpId& lhs, const OpId& rhs) { + if (!lhs.valid()) return rhs; + if (!rhs.valid()) return lhs; + return std::min(lhs, rhs); +} + +inline OpId OpId::MaxValid(const OpId& lhs, const OpId& rhs) { + if (!lhs.valid()) return rhs; + if (!rhs.valid()) return lhs; + return std::max(lhs, rhs); +} + std::ostream& operator<<(std::ostream& out, const OpId& op_id); size_t hash_value(const OpId& op_id) noexcept; diff --git a/src/yb/consensus/consensus.proto b/src/yb/consensus/consensus.proto index 5db17252fda8..ceb605627345 100644 --- a/src/yb/consensus/consensus.proto +++ b/src/yb/consensus/consensus.proto @@ -613,7 +613,10 @@ message ClientReplicatedRetryableRequestRangesPB { message TabletBootstrapStatePB { optional OpIdPB last_op_id = 1; repeated ClientReplicatedRetryableRequestRangesPB client_requests = 2; - optional fixed64 min_running_ht = 3; + + reserved 3; + + optional fixed64 min_replay_txn_start_ht = 4; } // A Raft implementation. diff --git a/src/yb/tablet/tablet.cc b/src/yb/tablet/tablet.cc index cb49192e65e7..51ecb5716666 100644 --- a/src/yb/tablet/tablet.cc +++ b/src/yb/tablet/tablet.cc @@ -491,11 +491,36 @@ Result CheckSafeTime(HybridTime time, HybridTime min_allowed) { } // namespace -class Tablet::RegularRocksDbListener : public rocksdb::EventListener { +class Tablet::RocksDbListener : public rocksdb::EventListener { public: - RegularRocksDbListener(Tablet* tablet, const std::string& log_prefix) - : tablet_(*CHECK_NOTNULL(tablet)), - log_prefix_(log_prefix) {} + RocksDbListener(Tablet& tablet, const std::string& log_prefix) + : tablet_(tablet), log_prefix_(log_prefix) {} + + void OnFlushCompleted(rocksdb::DB*, const rocksdb::FlushJobInfo&) override { + if (auto* participant = tablet_.transaction_participant()) { + VLOG_WITH_PREFIX_AND_FUNC(2) + << "RocksDB flush completed, triggering cleanup of recently applied transactions"; + auto status = participant->ProcessRecentlyAppliedTransactions(); + if (!status.ok() && !tablet_.shutdown_requested_.load(std::memory_order_acquire)) { + LOG_WITH_PREFIX_AND_FUNC(DFATAL) + << "Failed to clean up recently applied transactions: " << status; + } + } + } + + protected: + const std::string& LogPrefix() const { + return log_prefix_; + } + + Tablet& tablet_; + const std::string log_prefix_; +}; + +class Tablet::RegularRocksDbListener : public Tablet::RocksDbListener { + public: + RegularRocksDbListener(Tablet& tablet, const std::string& log_prefix) + : RocksDbListener(tablet, log_prefix) {} void OnCompactionCompleted(rocksdb::DB* db, const rocksdb::CompactionJobInfo& ci) override { auto& metadata = *CHECK_NOTNULL(tablet_.metadata()); @@ -528,7 +553,7 @@ class Tablet::RegularRocksDbListener : public rocksdb::EventListener { { auto scoped_read_operation = tablet_.CreateScopedRWOperationNotBlockingRocksDbShutdownStart(); if (!scoped_read_operation.ok()) { - VLOG_WITH_FUNC(4) << "Skip"; + VLOG_WITH_PREFIX_AND_FUNC(4) << "Skip"; return; } @@ -552,7 +577,7 @@ class Tablet::RegularRocksDbListener : public rocksdb::EventListener { if(!tablet_.metadata()->colocated()) { auto schema_version = tablet_.get_min_xcluster_schema_version_(primary_table_id, kColocationIdNotSet); - VLOG_WITH_FUNC(4) << + VLOG_WITH_PREFIX_AND_FUNC(4) << Format("MinNonXClusterSchemaVersion, MinXClusterSchemaVersion for $0:$1,$2", primary_table_id, min_schema_versions[Uuid::Nil()], schema_version); if (schema_version < min_schema_versions[Uuid::Nil()]) { @@ -566,7 +591,7 @@ class Tablet::RegularRocksDbListener : public rocksdb::EventListener { ColocationId colocation_id = colocated_tables[table_id.ToHexString()]; auto xcluster_min_schema_version = tablet_.get_min_xcluster_schema_version_(primary_table_id, colocation_id); - VLOG_WITH_FUNC(4) << + VLOG_WITH_PREFIX_AND_FUNC(4) << Format("MinNonXClusterSchemaVersion, MinXClusterSchemaVersion for $0,$1:$2,$3", primary_table_id, colocation_id, min_schema_versions[table_id], xcluster_min_schema_version); @@ -595,9 +620,6 @@ class Tablet::RegularRocksDbListener : public rocksdb::EventListener { smallest.MakeExternalSchemaVersionsAtMost(table_id_to_min_schema_version); } } - - Tablet& tablet_; - const std::string log_prefix_; }; Tablet::Tablet(const TabletInitData& data) @@ -972,7 +994,7 @@ Status Tablet::OpenKeyValueTablet() { rocksdb::Options regular_rocksdb_options(rocksdb_options); regular_rocksdb_options.listeners.push_back( - std::make_shared(this, regular_rocksdb_options.log_prefix)); + std::make_shared(*this, regular_rocksdb_options.log_prefix)); const string db_dir = metadata()->rocksdb_dir(); RETURN_NOT_OK(CreateTabletDirectories(db_dir, metadata()->fs_manager())); @@ -1020,6 +1042,9 @@ Status Tablet::OpenKeyValueTablet() { } intents_rocksdb_options.statistics = intentsdb_statistics_; + intents_rocksdb_options.listeners.push_back( + std::make_shared(*this, intents_rocksdb_options.log_prefix)); + rocksdb::DB* intents_db = nullptr; RETURN_NOT_OK( rocksdb::DB::Open(intents_rocksdb_options, db_dir + kIntentsDBSuffix, &intents_db)); diff --git a/src/yb/tablet/tablet.h b/src/yb/tablet/tablet.h index 82d42335f3b8..65f4a23d80e7 100644 --- a/src/yb/tablet/tablet.h +++ b/src/yb/tablet/tablet.h @@ -978,6 +978,7 @@ class Tablet : public AbstractTablet, friend class ScopedReadOperation; friend class TabletComponent; + class RocksDbListener; class RegularRocksDbListener; FRIEND_TEST(TestTablet, TestGetLogRetentionSizeForIndex); diff --git a/src/yb/tablet/tablet_bootstrap.cc b/src/yb/tablet/tablet_bootstrap.cc index 7cb76d3094b7..ad0c7bcfa9a9 100644 --- a/src/yb/tablet/tablet_bootstrap.cc +++ b/src/yb/tablet/tablet_bootstrap.cc @@ -136,8 +136,8 @@ DEFINE_RUNTIME_bool(skip_flushed_entries_in_first_replayed_segment, true, "If applicable, only replay entries that are not flushed to RocksDB or necessary " "to bootstrap retryable requests in the first replayed wal segment."); -DEFINE_RUNTIME_bool(use_bootstrap_intent_ht_filter, false, - "Use min running hybrid time filter for bootstrap."); +DEFINE_RUNTIME_bool(use_bootstrap_intent_ht_filter, true, + "Use min replay txn start time filter for bootstrap."); DECLARE_int32(retryable_request_timeout_secs); @@ -506,7 +506,7 @@ class TabletBootstrap { } std::optional bootstrap_state_pb = std::nullopt; - HybridTime min_running_ht = HybridTime::kInvalid; + HybridTime min_replay_txn_start_ht = HybridTime::kInvalid; if (GetAtomicFlag(&FLAGS_enable_flush_retryable_requests) && data_.bootstrap_state_manager) { auto result = data_.bootstrap_state_manager->LoadFromDisk(); if (result.ok()) { @@ -514,14 +514,14 @@ class TabletBootstrap { if (GetAtomicFlag(&FLAGS_use_bootstrap_intent_ht_filter)) { const auto& bootstrap_state = data_.bootstrap_state_manager->bootstrap_state(); - min_running_ht = bootstrap_state.GetMinRunningHybridTime(); + min_replay_txn_start_ht = bootstrap_state.GetMinReplayTxnStartTime(); } } else if (!result.status().IsNotFound()) { return result.status(); } } - const bool has_blocks = VERIFY_RESULT(OpenTablet(min_running_ht)); + const bool has_blocks = VERIFY_RESULT(OpenTablet(min_replay_txn_start_ht)); if (data_.retryable_requests) { const auto retryable_request_timeout_secs = meta_->IsSysCatalog() @@ -623,7 +623,7 @@ class TabletBootstrap { } // Sets result to true if there was any data on disk for this tablet. - Result OpenTablet(HybridTime min_running_ht) { + Result OpenTablet(HybridTime min_replay_txn_start_ht) { CleanupSnapshots(); // Use operator new instead of make_shared for creating the shared_ptr. That way, we would have // the shared_ptr's control block hold a raw pointer to the Tablet object as opposed to the @@ -637,7 +637,7 @@ class TabletBootstrap { auto participant = tablet->transaction_participant(); if (participant) { - participant->SetMinRunningHybridTimeLowerBound(min_running_ht); + participant->SetMinReplayTxnStartTimeLowerBound(min_replay_txn_start_ht); } // Doing nothing for now except opening a tablet locally. diff --git a/src/yb/tablet/tablet_bootstrap_state_flusher.cc b/src/yb/tablet/tablet_bootstrap_state_flusher.cc index d739eb5d9742..969aff0dbe0e 100644 --- a/src/yb/tablet/tablet_bootstrap_state_flusher.cc +++ b/src/yb/tablet/tablet_bootstrap_state_flusher.cc @@ -125,7 +125,7 @@ Status TabletBootstrapStateFlusher::FlushBootstrapState(TabletBootstrapFlushStat SetIdleAndNotifyAll(); }); TEST_PAUSE_IF_FLAG(TEST_pause_before_flushing_bootstrap_state); - return bootstrap_state_manager_->SaveToDisk(*raft_consensus_); + return bootstrap_state_manager_->SaveToDisk(tablet_, *raft_consensus_); } Status TabletBootstrapStateFlusher::SubmitFlushBootstrapStateTask() { diff --git a/src/yb/tablet/tablet_bootstrap_state_flusher.h b/src/yb/tablet/tablet_bootstrap_state_flusher.h index 189c5cabb9f6..0bcaf7460d73 100644 --- a/src/yb/tablet/tablet_bootstrap_state_flusher.h +++ b/src/yb/tablet/tablet_bootstrap_state_flusher.h @@ -40,10 +40,12 @@ class TabletBootstrapStateFlusher : public: TabletBootstrapStateFlusher( const std::string& tablet_id, + TabletWeakPtr tablet, std::shared_ptr raft_consensus, std::shared_ptr bootstrap_state_manager, std::unique_ptr flush_bootstrap_state_pool_token) : tablet_id_(tablet_id), + tablet_(std::move(tablet)), raft_consensus_(raft_consensus), bootstrap_state_manager_(bootstrap_state_manager), flush_bootstrap_state_pool_token_(std::move(flush_bootstrap_state_pool_token)) {} @@ -77,6 +79,7 @@ class TabletBootstrapStateFlusher : mutable std::condition_variable flush_cond_; std::atomic flush_state_{TabletBootstrapFlushState::kFlushIdle}; TabletId tablet_id_; + TabletWeakPtr tablet_; std::shared_ptr raft_consensus_; std::shared_ptr bootstrap_state_manager_; std::unique_ptr flush_bootstrap_state_pool_token_; diff --git a/src/yb/tablet/tablet_bootstrap_state_manager.cc b/src/yb/tablet/tablet_bootstrap_state_manager.cc index ef0573e868a1..3f739223abc0 100644 --- a/src/yb/tablet/tablet_bootstrap_state_manager.cc +++ b/src/yb/tablet/tablet_bootstrap_state_manager.cc @@ -21,32 +21,36 @@ #include "yb/consensus/retryable_requests.h" #include "yb/consensus/opid_util.h" +#include "yb/tablet/tablet.h" +#include "yb/tablet/transaction_participant.h" + #include "yb/util/debug-util.h" #include "yb/util/env_util.h" namespace yb::tablet { TabletBootstrapState::TabletBootstrapState(const TabletBootstrapState& rhs): - min_running_ht_(rhs.min_running_ht_.load()) {} + min_replay_txn_start_ht_(rhs.min_replay_txn_start_ht_.load()) {} TabletBootstrapState::TabletBootstrapState(TabletBootstrapState&& rhs): - min_running_ht_(rhs.min_running_ht_.load()) {} + min_replay_txn_start_ht_(rhs.min_replay_txn_start_ht_.load()) {} void TabletBootstrapState::operator=(TabletBootstrapState&& rhs) { - min_running_ht_.store(rhs.min_running_ht_.load()); + min_replay_txn_start_ht_.store(rhs.min_replay_txn_start_ht_.load()); } void TabletBootstrapState::CopyFrom(const TabletBootstrapState& rhs) { - min_running_ht_.store(rhs.min_running_ht_.load()); + min_replay_txn_start_ht_.store(rhs.min_replay_txn_start_ht_.load()); } void TabletBootstrapState::ToPB(consensus::TabletBootstrapStatePB* pb) const { - pb->set_min_running_ht(min_running_ht_.load().ToUint64()); + pb->set_min_replay_txn_start_ht(min_replay_txn_start_ht_.load().ToUint64()); } void TabletBootstrapState::FromPB(const consensus::TabletBootstrapStatePB& pb) { - min_running_ht_.store( - pb.has_min_running_ht() ? HybridTime(pb.min_running_ht()) : HybridTime::kInvalid); + min_replay_txn_start_ht_.store( + pb.has_min_replay_txn_start_ht() ? HybridTime(pb.min_replay_txn_start_ht()) + : HybridTime::kInvalid); } TabletBootstrapStateManager::TabletBootstrapStateManager() { } @@ -71,15 +75,32 @@ Status TabletBootstrapStateManager::Init() { return Status::OK(); } -Status TabletBootstrapStateManager::SaveToDisk(consensus::RaftConsensus& raft_consensus) { +Status TabletBootstrapStateManager::SaveToDisk( + const TabletWeakPtr& tablet_ptr, consensus::RaftConsensus& raft_consensus) { auto retryable_requests = VERIFY_RESULT(raft_consensus.TakeSnapshotOfRetryableRequests()); if (!retryable_requests) { LOG(INFO) << "Nothing to save"; return Status::OK(); } + auto max_replicated_op_id = retryable_requests->GetMaxReplicatedOpId(); + TabletBootstrapState bootstrap_state(bootstrap_state_); + // Set min replay txn start time to what it will be after this flush succeeds - this is safe + // because if the flush succeeds, replay start op id will be calculated from the new value. + auto tablet = tablet_ptr.lock(); + TransactionParticipant* participant = nullptr; + if (tablet) { + participant = tablet->transaction_participant(); + if (participant) { + auto start_ht = VERIFY_RESULT(participant->SimulateProcessRecentlyAppliedTransactions( + max_replicated_op_id)); + VLOG(1) << "Using min_replay_txn_start_ht = " << start_ht; + bootstrap_state.SetMinReplayTxnStartTime(start_ht); + } + } + consensus::TabletBootstrapStatePB pb; retryable_requests->ToPB(&pb); bootstrap_state.ToPB(&pb); @@ -101,8 +122,16 @@ Status TabletBootstrapStateManager::SaveToDisk(consensus::RaftConsensus& raft_co has_file_on_disk_ = true; RETURN_NOT_OK(env->SyncDir(dir_)); - auto max_replicated_op_id = retryable_requests->GetMaxReplicatedOpId(); - return raft_consensus.SetLastFlushedOpIdInRetryableRequests(max_replicated_op_id); + RETURN_NOT_OK(raft_consensus.SetLastFlushedOpIdInRetryableRequests(max_replicated_op_id)); + + if (participant) { + VLOG(1) + << "Bootstrap state saved to disk, triggering cleanup of recently applied transactions"; + participant->SetRetryableRequestsFlushedOpId(max_replicated_op_id); + return participant->ProcessRecentlyAppliedTransactions(); + } + + return Status::OK(); } Result TabletBootstrapStateManager::LoadFromDisk() { diff --git a/src/yb/tablet/tablet_bootstrap_state_manager.h b/src/yb/tablet/tablet_bootstrap_state_manager.h index dd7c60dd2be6..17f011c3d9cf 100644 --- a/src/yb/tablet/tablet_bootstrap_state_manager.h +++ b/src/yb/tablet/tablet_bootstrap_state_manager.h @@ -42,14 +42,17 @@ class TabletBootstrapState { void CopyFrom(const TabletBootstrapState& rhs); - void SetMinRunningHybridTime(HybridTime min_running_ht) { min_running_ht_.store(min_running_ht); } - HybridTime GetMinRunningHybridTime() const { return min_running_ht_.load(); } + void SetMinReplayTxnStartTime(HybridTime min_replay_txn_start_ht) { + min_replay_txn_start_ht_.store(min_replay_txn_start_ht); + } + + HybridTime GetMinReplayTxnStartTime() const { return min_replay_txn_start_ht_.load(); } void ToPB(consensus::TabletBootstrapStatePB* pb) const; void FromPB(const consensus::TabletBootstrapStatePB& pb); private: - std::atomic min_running_ht_{HybridTime::kInvalid}; + std::atomic min_replay_txn_start_ht_{HybridTime::kInvalid}; }; class TabletBootstrapStateManager { @@ -74,7 +77,7 @@ class TabletBootstrapStateManager { } // Flush the pb as the latest version. - Status SaveToDisk(consensus::RaftConsensus& raft_consensus); + Status SaveToDisk(const TabletWeakPtr& tablet_ptr, consensus::RaftConsensus& raft_consensus); // Load the latest version from disk if any. Result LoadFromDisk(); diff --git a/src/yb/tablet/tablet_peer.cc b/src/yb/tablet/tablet_peer.cc index 28942c1baa18..103b638419c7 100644 --- a/src/yb/tablet/tablet_peer.cc +++ b/src/yb/tablet/tablet_peer.cc @@ -320,7 +320,7 @@ Status TabletPeer::InitTabletPeer( auto flush_bootstrap_state_pool_token = flush_bootstrap_state_pool ? flush_bootstrap_state_pool->NewToken(ThreadPool::ExecutionMode::SERIAL) : nullptr; bootstrap_state_flusher_ = std::make_shared( - tablet_id_, consensus_, bootstrap_state_manager_, + tablet_id_, tablet_weak_, consensus_, bootstrap_state_manager_, std::move(flush_bootstrap_state_pool_token)); tablet_->SetHybridTimeLeaseProvider(std::bind(&TabletPeer::HybridTimeLease, this, _1, _2)); @@ -331,8 +331,8 @@ Status TabletPeer::InitTabletPeer( auto txn_participant = tablet_->transaction_participant(); if (txn_participant) { - txn_participant->SetMinRunningHybridTimeUpdateCallback( - std::bind_front(&TabletPeer::MinRunningHybridTimeUpdated, this)); + txn_participant->SetMinReplayTxnStartTimeUpdateCallback( + std::bind_front(&TabletPeer::MinReplayTxnStartTimeUpdated, this)); } // "Publish" the tablet object right before releasing the lock. @@ -942,6 +942,11 @@ void TabletPeer::GetInFlightOperations(Operation::TraceType trace_type, } } +Result TabletPeer::MaxPersistentOpId() const { + auto flush_op_ids = VERIFY_RESULT(tablet_->MaxPersistentOpId()); + return OpId::MinValid(flush_op_ids.intents, flush_op_ids.regular); +} + Result TabletPeer::GetEarliestNeededLogIndex(std::string* details) const { if (PREDICT_FALSE(!log_)) { auto status = STATUS(Uninitialized, "Log not ready (tablet peer not yet initialized?)"); @@ -1822,10 +1827,10 @@ TabletBootstrapFlushState TabletPeer::TEST_TabletBootstrapStateFlusherState() co : TabletBootstrapFlushState::kFlushIdle; } -void TabletPeer::MinRunningHybridTimeUpdated(HybridTime min_running_ht) { - if (min_running_ht && min_running_ht != HybridTime::kMax) { - VLOG_WITH_PREFIX(2) << "Min running hybrid time updated: " << min_running_ht; - bootstrap_state_manager_->bootstrap_state().SetMinRunningHybridTime(min_running_ht); +void TabletPeer::MinReplayTxnStartTimeUpdated(HybridTime start_ht) { + if (start_ht && start_ht != HybridTime::kMax) { + VLOG_WITH_PREFIX(2) << "min_replay_txn_start_ht updated: " << start_ht; + bootstrap_state_manager_->bootstrap_state().SetMinReplayTxnStartTime(start_ht); } } diff --git a/src/yb/tablet/tablet_peer.h b/src/yb/tablet/tablet_peer.h index 2c6fbb1e54db..50abce111683 100644 --- a/src/yb/tablet/tablet_peer.h +++ b/src/yb/tablet/tablet_peer.h @@ -320,6 +320,8 @@ class TabletPeer : public std::enable_shared_from_this, // to it. Result GetEarliestNeededLogIndex(std::string* details = nullptr) const; + Result MaxPersistentOpId() const override; + // Returns the the minimum log index for transaction tables and latest log index for other tables. // Returns the bootstrap_time which is safe_time higher than the time of the returned OpId. // If FLAGS_abort_active_txns_during_cdc_bootstrap is set then all active transactions are @@ -600,7 +602,7 @@ class TabletPeer : public std::enable_shared_from_this, bool FlushBootstrapStateEnabled() const; - void MinRunningHybridTimeUpdated(HybridTime min_running_ht); + void MinReplayTxnStartTimeUpdated(HybridTime start_ht); MetricRegistry* metric_registry_; diff --git a/src/yb/tablet/transaction_loader.cc b/src/yb/tablet/transaction_loader.cc index a7be08d4865c..cd2c8295040e 100644 --- a/src/yb/tablet/transaction_loader.cc +++ b/src/yb/tablet/transaction_loader.cc @@ -77,11 +77,11 @@ class TransactionLoader::Executor { if (!scoped_pending_operation_.ok()) { return false; } - auto min_running_ht = context().MinRunningHybridTime(); - VLOG_WITH_PREFIX(1) << "TransactionLoader min_running_ht: " << min_running_ht; + auto min_replay_txn_start_ht = context().MinReplayTxnStartTime(); + VLOG_WITH_PREFIX(1) << "TransactionLoader min_replay_txn_start_ht: " << min_replay_txn_start_ht; regular_iterator_ = CreateFullScanIterator(db.regular, nullptr /* filter */); intents_iterator_ = CreateFullScanIterator(db.intents, - docdb::CreateIntentHybridTimeFileFilter(min_running_ht)); + docdb::CreateIntentHybridTimeFileFilter(min_replay_txn_start_ht)); loader_.state_.store(TransactionLoaderState::kLoading, std::memory_order_release); CHECK_OK(yb::Thread::Create( "transaction_loader", "loader", &Executor::Execute, this, &loader_.load_thread_)) diff --git a/src/yb/tablet/transaction_loader.h b/src/yb/tablet/transaction_loader.h index e317cbca9c8e..3ea8d00d750e 100644 --- a/src/yb/tablet/transaction_loader.h +++ b/src/yb/tablet/transaction_loader.h @@ -58,7 +58,7 @@ class TransactionLoaderContext { OneWayBitmap&& replicated_batches, const ApplyStateWithCommitHt* pending_apply) = 0; virtual void LoadFinished(Status load_status) = 0; - virtual HybridTime MinRunningHybridTime() = 0; + virtual HybridTime MinReplayTxnStartTime() = 0; }; YB_DEFINE_ENUM(TransactionLoaderState, (kNotStarted)(kLoading)(kCompleted)(kFailed)); diff --git a/src/yb/tablet/transaction_participant.cc b/src/yb/tablet/transaction_participant.cc index 78ba97bcf340..d961f600f369 100644 --- a/src/yb/tablet/transaction_participant.cc +++ b/src/yb/tablet/transaction_participant.cc @@ -20,6 +20,7 @@ #include #include +#include #include #include "yb/client/transaction_rpc.h" @@ -49,6 +50,7 @@ #include "yb/tserver/tserver_service.pb.h" +#include "yb/util/algorithm_util.h" #include "yb/util/async_util.h" #include "yb/util/callsite_profiling.h" #include "yb/util/countdown_latch.h" @@ -113,6 +115,9 @@ DEFINE_RUNTIME_bool(cdc_immediate_transaction_cleanup, true, DEFINE_test_flag(int32, stopactivetxns_sleep_in_abort_cb_ms, 0, "Delays the abort callback in StopActiveTxns to repro GitHub #23399."); +DEFINE_test_flag(bool, no_schedule_remove_intents, false, + "Don't schedule remove intents when transaction is cleaned from memory."); + DECLARE_int64(transaction_abort_check_timeout_ms); DECLARE_int64(cdc_intent_retention_ms); @@ -130,6 +135,10 @@ METRIC_DEFINE_simple_gauge_uint64( tablet, aborted_transactions_pending_cleanup, "Total number of aborted transactions running in participant", yb::MetricUnit::kTransactions); +METRIC_DEFINE_simple_gauge_uint64( + tablet, wal_replayable_applied_transactions, + "Total number of recently applied transactions that may be found during WAL replay", + yb::MetricUnit::kTransactions); METRIC_DEFINE_event_stats(tablet, conflict_resolution_latency, "Conflict Resolution Latency", yb::MetricUnit::kMicroseconds, "Microseconds spent on conflict resolution across all " @@ -197,6 +206,8 @@ class TransactionParticipant::Impl metric_transaction_not_found_ = METRIC_transaction_not_found.Instantiate(entity); metric_aborted_transactions_pending_cleanup_ = METRIC_aborted_transactions_pending_cleanup.Instantiate(entity, 0); + metric_wal_replayable_applied_transactions_ = + METRIC_wal_replayable_applied_transactions.Instantiate(entity, 0); metric_conflict_resolution_latency_ = METRIC_conflict_resolution_latency.Instantiate(entity); metric_conflict_resolution_num_keys_scanned_ = @@ -624,7 +635,9 @@ class TransactionParticipant::Impl OpId op_id = (**it).GetApplyOpId(); if (op_id <= checkpoint_op_id) { - (**it).ScheduleRemoveIntents(*it, front.reason); + if (PREDICT_TRUE(!GetAtomicFlag(&FLAGS_TEST_no_schedule_remove_intents))) { + (**it).ScheduleRemoveIntents(*it, front.reason); + } } else { if (!GetAtomicFlag(&FLAGS_cdc_write_post_apply_metadata) || !GetAtomicFlag(&FLAGS_cdc_immediate_transaction_cleanup)) { @@ -1075,18 +1088,22 @@ class TransactionParticipant::Impl return &participant_context_; } - void SetMinRunningHybridTimeLowerBound(HybridTime lower_bound) { - if (lower_bound == HybridTime::kMax || lower_bound == HybridTime::kInvalid) { + void SetMinReplayTxnStartTimeLowerBound(HybridTime start_ht) { + if (start_ht == HybridTime::kMax || start_ht == HybridTime::kInvalid) { return; } - HybridTime current_ht = min_running_ht_.load(std::memory_order_acquire); - while ((!current_ht || current_ht < lower_bound) - && !min_running_ht_.compare_exchange_weak(current_ht, lower_bound)) {} - VLOG_WITH_PREFIX(1) << "Updated min running hybrid time to at least " << lower_bound + HybridTime current_ht = min_replay_txn_start_ht_.load(std::memory_order_acquire); + while ((!current_ht || current_ht < start_ht) + && !min_replay_txn_start_ht_.compare_exchange_weak(current_ht, start_ht)) {} + VLOG_WITH_PREFIX(1) << "Set min replay txn start time to at least " << start_ht << ", was " << current_ht; } - HybridTime MinRunningHybridTime() override { + HybridTime MinReplayTxnStartTime() override { + return min_replay_txn_start_ht_.load(std::memory_order_acquire); + } + + HybridTime MinRunningHybridTime() { auto result = min_running_ht_.load(std::memory_order_acquire); if (result == HybridTime::kMax || result == HybridTime::kInvalid || !transactions_loaded_.load()) { @@ -1252,9 +1269,9 @@ class TransactionParticipant::Impl return transactions_.size(); } - void SetMinRunningHybridTimeUpdateCallback(std::function callback) { + void SetMinReplayTxnStartTimeUpdateCallback(std::function callback) { std::lock_guard lock(mutex_); - min_running_ht_callback_ = std::move(callback); + min_replay_txn_start_ht_callback_ = std::move(callback); } OneWayBitmap TEST_TransactionReplicatedBatches(const TransactionId& id) { @@ -1406,9 +1423,28 @@ class TransactionParticipant::Impl metric_conflict_resolution_latency_->Increment(latency.ToMilliseconds()); } + Result SimulateProcessRecentlyAppliedTransactions( + const OpId& retryable_requests_flushed_op_id) EXCLUDES(mutex_) { + std::lock_guard lock(mutex_); + return DoProcessRecentlyAppliedTransactions( + retryable_requests_flushed_op_id, false /* persist */); + } + + void SetRetryableRequestsFlushedOpId(const OpId& flushed_op_id) EXCLUDES(mutex_) { + std::lock_guard lock(mutex_); + retryable_requests_flushed_op_id_ = flushed_op_id; + } + + Status ProcessRecentlyAppliedTransactions() EXCLUDES(mutex_) { + std::lock_guard lock(mutex_); + return ResultToStatus(DoProcessRecentlyAppliedTransactions( + retryable_requests_flushed_op_id_, true /* persist */)); + } + private: class AbortCheckTimeTag; class StartTimeTag; + class ApplyOpIdTag; typedef boost::multi_index_container > Transactions; + struct AppliedTransactionState { + OpId apply_op_id; + HybridTime start_ht; + }; + + using RecentlyAppliedTransactions = boost::multi_index_container, + boost::multi_index::member < + AppliedTransactionState, OpId, &AppliedTransactionState::apply_op_id> + >, + boost::multi_index::ordered_non_unique < + boost::multi_index::tag, + boost::multi_index::member < + AppliedTransactionState, HybridTime, &AppliedTransactionState::start_ht> + > + > + >; + void LoadFinished(Status load_status) EXCLUDES(status_resolvers_mutex_) override { // The start_latch will be hit either from a CountDown from Start, or from Shutdown, so make // sure that at the end of Load, we unblock shutdown. @@ -1509,10 +1565,8 @@ class TransactionParticipant::Impl } void SetMinRunningHybridTime(HybridTime min_running_ht) REQUIRES(mutex_) { - min_running_ht_.store(min_running_ht, std::memory_order_release); - if (min_running_ht_callback_) { - min_running_ht_callback_(min_running_ht); - } + min_running_ht_.store(min_running_ht); + UpdateMinReplayTxnStartTimeIfNeeded(); } void TransactionsModifiedUnlocked(MinRunningNotifier* min_running_notifier) REQUIRES(mutex_) { @@ -1632,7 +1686,9 @@ class TransactionParticipant::Impl bool remove_transaction = true; if (op_id < checkpoint_op_id) { - (**it).ScheduleRemoveIntents(*it, reason); + if (PREDICT_TRUE(!GetAtomicFlag(&FLAGS_TEST_no_schedule_remove_intents))) { + (**it).ScheduleRemoveIntents(*it, reason); + } } else { if (!GetAtomicFlag(&FLAGS_cdc_write_post_apply_metadata) || !GetAtomicFlag(&FLAGS_cdc_immediate_transaction_cleanup)) { @@ -1811,6 +1867,7 @@ class TransactionParticipant::Impl recently_removed_transactions_cleanup_queue_.push_back({transaction.id(), now + 15s}); LOG_IF_WITH_PREFIX(DFATAL, !recently_removed_transactions_.insert(transaction.id()).second) << "Transaction removed twice: " << transaction.id(); + AddRecentlyAppliedTransaction(transaction.start_ht(), transaction.GetApplyOpId()); transactions_.erase(it); mem_tracker_->Release(kRunningTransactionSize); TransactionsModifiedUnlocked(min_running_notifier); @@ -2087,6 +2144,100 @@ class TransactionParticipant::Impl participant_context_.StrandEnqueue(write_metadata_task.get()); } + void AddRecentlyAppliedTransaction(HybridTime start_ht, const OpId& apply_op_id) + REQUIRES(mutex_) { + // We only care about the min start_ht, while cleaning out all entries with apply_op_id less + // than progressively higher boundaries, so entries with apply_op_id lower and higher start_ht + // than the entry with the lowest start_ht are irrelevant. Likewise, if apply_op_id is higher + // and start_ht is lower than the lowest start_ht entry, the lowest start_ht entry is now + // irrelevant and can be cleaned up. + + int64_t cleaned = 0; + if (!recently_applied_.empty()) { + auto& index = recently_applied_.get(); + + auto itr = index.begin(); + if (start_ht >= itr->start_ht && apply_op_id <= itr->apply_op_id) { + VLOG_WITH_PREFIX(2) + << "Not adding recently applied transaction: " + << "start_ht=" << start_ht << " (min=" << itr->start_ht << "), " + << "apply_op_id=" << apply_op_id << " (min=" << itr->apply_op_id << ")"; + return; + } + + cleaned = EraseElementsUntil( + index, + [start_ht, &apply_op_id](const AppliedTransactionState& state) { + return start_ht > state.start_ht || apply_op_id < state.apply_op_id; + }); + } + + VLOG_WITH_PREFIX(2) + << "Adding recently applied transaction: " + << "start_ht=" << start_ht << " apply_op_id=" << apply_op_id + << " (cleaned " << cleaned << ")"; + recently_applied_.insert(AppliedTransactionState{apply_op_id, start_ht}); + metric_wal_replayable_applied_transactions_->IncrementBy(1 - static_cast(cleaned)); + UpdateMinReplayTxnStartTimeIfNeeded(); + } + + Result DoProcessRecentlyAppliedTransactions( + const OpId& retryable_requests_flushed_op_id, bool persist) REQUIRES(mutex_) { + auto threshold = VERIFY_RESULT(participant_context_.MaxPersistentOpId()); + threshold = OpId::MinValid(threshold, retryable_requests_flushed_op_id); + + if (!threshold.valid()) { + return min_replay_txn_start_ht_.load(std::memory_order_acquire); + } + + auto recently_applied_copy = + persist ? RecentlyAppliedTransactions() : RecentlyAppliedTransactions(recently_applied_); + auto& recently_applied = persist ? recently_applied_ : recently_applied_copy; + + auto cleaned = CleanRecentlyAppliedTransactions(recently_applied, threshold); + if (persist && cleaned > 0) { + metric_wal_replayable_applied_transactions_->DecrementBy(cleaned); + VLOG_WITH_PREFIX(1) << "Cleaned recently applied transactions with threshold: " << threshold + << ", cleaned " << cleaned + << ", remaining " << recently_applied_.size(); + UpdateMinReplayTxnStartTimeIfNeeded(); + } + + return GetMinReplayTxnStartTime(recently_applied); + } + + int64_t CleanRecentlyAppliedTransactions( + RecentlyAppliedTransactions& recently_applied, const OpId& threshold) { + if (!threshold.valid() || recently_applied.empty()) { + return 0; + } + + return EraseElementsUntil( + recently_applied.get(), + [&threshold](const AppliedTransactionState& state) { + return state.apply_op_id >= threshold; + }); + } + + HybridTime GetMinReplayTxnStartTime(RecentlyAppliedTransactions& recently_applied) { + auto min_running_ht = min_running_ht_.load(std::memory_order_acquire); + auto applied_min_ht = recently_applied.empty() + ? HybridTime::kMax + : (*recently_applied.get().begin()).start_ht; + + applied_min_ht.MakeAtMost(min_running_ht); + return applied_min_ht; + } + + void UpdateMinReplayTxnStartTimeIfNeeded() REQUIRES(mutex_) { + if (min_replay_txn_start_ht_callback_) { + auto ht = GetMinReplayTxnStartTime(recently_applied_); + if (min_replay_txn_start_ht_.exchange(ht, std::memory_order_acq_rel) != ht) { + min_replay_txn_start_ht_callback_(ht); + } + } + } + struct ImmediateCleanupQueueEntry { int64_t request_id; TransactionId transaction_id; @@ -2136,6 +2287,16 @@ class TransactionParticipant::Impl std::deque immediate_cleanup_queue_ GUARDED_BY(mutex_); std::deque graceful_cleanup_queue_ GUARDED_BY(mutex_); + // Information about recently applied transactions that are still needed at bootstrap time, used + // to calculate min_replay_txn_start_ht (lowest start_ht of any transaction which may be + // read during bootstrap log replay). + RecentlyAppliedTransactions recently_applied_ GUARDED_BY(mutex_); + + // Retryable requests flushed_op_id, used to calculate bootstrap_start_op_id. A copy is held + // here instead of querying participant_context_ to avoid grabbing TabletPeer lock and causing + // a deadlock between flush listener and thread waiting for sync flush. + OpId retryable_requests_flushed_op_id_ GUARDED_BY(mutex_) = OpId::Invalid(); + // Remove queue maintains transactions that could be cleaned when safe time for follower reaches // appropriate time for an entry. // Since we add entries with increasing time, this queue is ordered by time. @@ -2171,6 +2332,7 @@ class TransactionParticipant::Impl scoped_refptr> metric_transactions_running_; scoped_refptr> metric_aborted_transactions_pending_cleanup_; + scoped_refptr> metric_wal_replayable_applied_transactions_; scoped_refptr metric_transaction_not_found_; scoped_refptr metric_conflict_resolution_latency_; scoped_refptr metric_conflict_resolution_num_keys_scanned_; @@ -2181,10 +2343,11 @@ class TransactionParticipant::Impl CountDownLatch shutdown_latch_{1}; std::atomic min_running_ht_{HybridTime::kInvalid}; + std::atomic min_replay_txn_start_ht_{HybridTime::kInvalid}; std::atomic next_check_min_running_{CoarseTimePoint()}; HybridTime waiting_for_min_running_ht_ = HybridTime::kMax; std::atomic shutdown_done_{false}; - std::function min_running_ht_callback_ GUARDED_BY(mutex_); + std::function min_replay_txn_start_ht_callback_ GUARDED_BY(mutex_); LRUCache cleanup_cache_{FLAGS_transactions_cleanup_cache_size}; @@ -2331,8 +2494,12 @@ TransactionParticipantContext* TransactionParticipant::context() const { return impl_->participant_context(); } -void TransactionParticipant::SetMinRunningHybridTimeLowerBound(HybridTime lower_bound) { - impl_->SetMinRunningHybridTimeLowerBound(lower_bound); +void TransactionParticipant::SetMinReplayTxnStartTimeLowerBound(HybridTime start_ht) { + impl_->SetMinReplayTxnStartTimeLowerBound(start_ht); +} + +HybridTime TransactionParticipant::MinReplayTxnStartTime() const { + return impl_->MinReplayTxnStartTime(); } HybridTime TransactionParticipant::MinRunningHybridTime() const { @@ -2444,9 +2611,22 @@ void TransactionParticipant::RecordConflictResolutionScanLatency(MonoDelta laten impl_->RecordConflictResolutionScanLatency(latency); } -void TransactionParticipant::SetMinRunningHybridTimeUpdateCallback( +void TransactionParticipant::SetMinReplayTxnStartTimeUpdateCallback( std::function callback) { - impl_->SetMinRunningHybridTimeUpdateCallback(std::move(callback)); + impl_->SetMinReplayTxnStartTimeUpdateCallback(std::move(callback)); +} + +Result TransactionParticipant::SimulateProcessRecentlyAppliedTransactions( + const OpId& retryable_requests_flushed_op_id) { + return impl_->SimulateProcessRecentlyAppliedTransactions(retryable_requests_flushed_op_id); +} + +void TransactionParticipant::SetRetryableRequestsFlushedOpId(const OpId& flushed_op_id) { + return impl_->SetRetryableRequestsFlushedOpId(flushed_op_id); +} + +Status TransactionParticipant::ProcessRecentlyAppliedTransactions() { + return impl_->ProcessRecentlyAppliedTransactions(); } } // namespace tablet diff --git a/src/yb/tablet/transaction_participant.h b/src/yb/tablet/transaction_participant.h index 5079ae79702c..b9e291153ef1 100644 --- a/src/yb/tablet/transaction_participant.h +++ b/src/yb/tablet/transaction_participant.h @@ -185,7 +185,9 @@ class TransactionParticipant : public TransactionStatusManager { TransactionParticipantContext* context() const; - void SetMinRunningHybridTimeLowerBound(HybridTime lower_bound); + void SetMinReplayTxnStartTimeLowerBound(HybridTime start_ht); + + HybridTime MinReplayTxnStartTime() const; HybridTime MinRunningHybridTime() const override; @@ -240,7 +242,7 @@ class TransactionParticipant : public TransactionStatusManager { size_t GetNumRunningTransactions() const; - void SetMinRunningHybridTimeUpdateCallback(std::function callback); + void SetMinReplayTxnStartTimeUpdateCallback(std::function callback); struct CountIntentsResult { size_t num_intents; @@ -253,6 +255,13 @@ class TransactionParticipant : public TransactionStatusManager { OneWayBitmap TEST_TransactionReplicatedBatches(const TransactionId& id) const; + Result SimulateProcessRecentlyAppliedTransactions( + const OpId& retryable_requests_flushed_op_id); + + void SetRetryableRequestsFlushedOpId(const OpId& flushed_op_id); + + Status ProcessRecentlyAppliedTransactions(); + private: Result RegisterRequest() override; void UnregisterRequest(int64_t request) override; diff --git a/src/yb/tablet/transaction_participant_context.h b/src/yb/tablet/transaction_participant_context.h index ca3d130bb06e..a8b0f5f6d0dd 100644 --- a/src/yb/tablet/transaction_participant_context.h +++ b/src/yb/tablet/transaction_participant_context.h @@ -48,6 +48,8 @@ class TransactionParticipantContext { // Returns hybrid time that lower than any future transaction apply record. virtual HybridTime SafeTimeForTransactionParticipant() = 0; + virtual Result MaxPersistentOpId() const = 0; + virtual Result WaitForSafeTime(HybridTime safe_time, CoarseTimePoint deadline) = 0; std::string LogPrefix() const; diff --git a/src/yb/util/algorithm_util.h b/src/yb/util/algorithm_util.h index e2f9e06bc086..4da60d3af8fa 100644 --- a/src/yb/util/algorithm_util.h +++ b/src/yb/util/algorithm_util.h @@ -98,4 +98,16 @@ auto StableSorted(const Col& collection, const Extractor& extractor) { return order; } +// Erases elements from container until predicate is satisfied. +template +size_t EraseElementsUntil(Container& container, const Predicate& predicate) { + size_t erased = 0; + auto itr = container.begin(); + while (itr != container.end() && !predicate(*itr)) { + itr = container.erase(itr); + ++erased; + } + return erased; +} + }; // namespace yb diff --git a/src/yb/yql/pgwrapper/pg_mini-test.cc b/src/yb/yql/pgwrapper/pg_mini-test.cc index 90667f97f0a0..a216b6503bd1 100644 --- a/src/yb/yql/pgwrapper/pg_mini-test.cc +++ b/src/yb/yql/pgwrapper/pg_mini-test.cc @@ -82,6 +82,10 @@ DECLARE_bool(enable_wait_queues); DECLARE_bool(pg_client_use_shared_memory); DECLARE_bool(ysql_yb_enable_replica_identity); DECLARE_bool(TEST_enable_pg_client_mock); +DECLARE_bool(delete_intents_sst_files); +DECLARE_bool(use_bootstrap_intent_ht_filter); +DECLARE_bool(TEST_no_schedule_remove_intents); +DECLARE_bool(TEST_disable_flush_on_shutdown); DECLARE_double(TEST_respond_write_failed_probability); DECLARE_double(TEST_transaction_ignore_applying_probability); @@ -2395,6 +2399,54 @@ TEST_F_EX(PgMiniTest, RegexPushdown, PgMiniTestSingleNode) { } } +TEST_F(PgMiniTestSingleNode, TestBootstrapOnAppliedTransactionWithIntents) { + ANNOTATE_UNPROTECTED_WRITE(FLAGS_delete_intents_sst_files) = false; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_use_bootstrap_intent_ht_filter) = true; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_no_schedule_remove_intents) = true; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_disable_flush_on_shutdown) = true; + + auto conn1 = ASSERT_RESULT(Connect()); + auto conn2 = ASSERT_RESULT(Connect()); + + LOG(INFO) << "Creating table"; + ASSERT_OK(conn1.Execute("CREATE TABLE test(a int) SPLIT INTO 1 TABLETS")); + + const auto& peers = ListTabletPeers(cluster_.get(), ListPeersFilter::kLeaders); + tablet::TabletPeerPtr tablet_peer = nullptr; + for (auto peer : peers) { + if (peer->shared_tablet()->regular_db()) { + tablet_peer = peer; + break; + } + } + ASSERT_NE(tablet_peer, nullptr); + + LOG(INFO) << "T1 - BEGIN/INSERT"; + ASSERT_OK(conn1.StartTransaction(IsolationLevel::SNAPSHOT_ISOLATION)); + ASSERT_OK(conn1.Execute("INSERT INTO test(a) VALUES (0)")); + + LOG(INFO) << "Flush"; + ASSERT_OK(tablet_peer->shared_tablet()->Flush(tablet::FlushMode::kSync)); + + LOG(INFO) << "T2 - BEGIN/INSERT"; + ASSERT_OK(conn2.StartTransaction(IsolationLevel::SNAPSHOT_ISOLATION)); + ASSERT_OK(conn2.Execute("INSERT INTO test(a) VALUES (1)")); + + LOG(INFO) << "Flush"; + ASSERT_OK(tablet_peer->shared_tablet()->Flush(tablet::FlushMode::kSync)); + + LOG(INFO) << "T1 - Commit"; + ASSERT_OK(conn1.CommitTransaction()); + + ASSERT_OK(tablet_peer->FlushBootstrapState()); + + LOG(INFO) << "Restarting cluster"; + ASSERT_OK(RestartCluster()); + + conn1 = ASSERT_RESULT(Connect()); + auto res = ASSERT_RESULT(conn1.FetchRow("SELECT COUNT(*) FROM test")); + ASSERT_EQ(res, 1); +} Status MockAbortFailure( const yb::tserver::PgFinishTransactionRequestPB* req, From 0b37479701d85a65ec0be95aad1d2ddee9bcb2a3 Mon Sep 17 00:00:00 2001 From: Manav Kumar Date: Tue, 17 Sep 2024 07:59:51 +0000 Subject: [PATCH 48/75] [#23770] YSQL: Stabalize TestPgExplainAnalyze#testExplainAnalyzeOptions the test with ysql connection manager Summary: This diff makes `org.yb.pgsql.TestPgExplainAnalyze#testExplainAnalyzeOptions` test to run in round robin mode when ran with ysql connection manager as the test expects caching of results on the backend on running the query, therefore with round random mode we can deterministically populate the cache on each backend and run the test with ysql conn mgr. Jira: DB-12674 Test Plan: Jenkins: enable connection manager, test regex: .*ExplainAnalyze.* Ensure below test is working: `./yb_build.sh --enable-ysql-conn-mgr-test --java-test org.yb.pgsql.TestPgExplainAnalyze#testExplainAnalyzeOptions` Reviewers: skumar, rbarigidad Reviewed By: rbarigidad Differential Revision: https://phorge.dev.yugabyte.com/D37891 --- .../src/test/java/org/yb/pgsql/TestPgExplainAnalyze.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgExplainAnalyze.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgExplainAnalyze.java index 660277768213..010c17311478 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgExplainAnalyze.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgExplainAnalyze.java @@ -753,6 +753,12 @@ public void testDeleteReturning() throws Exception { @Test public void testExplainAnalyzeOptions() throws Exception { + if (isTestRunningWithConnectionManager()) { + // (DB-12674) Allow tests to run in round-robin allocation mode when + // using a pool of warmed up connections to allow for deterministic results. + setConnMgrWarmupModeAndRestartCluster(ConnectionManagerWarmupMode.ROUND_ROBIN); + setUp(); + } String query = String.format("SELECT * FROM %s", TABLE_NAME); try (Statement stmt = connection.createStatement()) { setHideNonDeterministicFields(stmt, true); From 84f3fabc2f482040a4f35e31127ae54289845e84 Mon Sep 17 00:00:00 2001 From: Aleksandr Malyshev Date: Mon, 16 Sep 2024 09:27:58 +0300 Subject: [PATCH 49/75] [PLAT-15322] Make sure build files have fresh last_modified date to make sure Play Framework assets caching works as expected Summary: Play Framework sets ETag header for caching based on static resource last modified date. Currently, last modified date for our assets is always set at Jan 1, 2010 - which makes YBA UI to break after YBA upgrades. The reason is that browser is trying to use old index.html pagem which links to missing js files (js file names are generated somehow by UI build and is changing). This diff sets the environment variable, which sets file modification dates inside the build to a fresh timestamp. This will make sure browser downloads all the resources once after YBA upgrade and continue caching these after.. Test Plan: Built YBA package with yb_release. Made sure web asset files have the right last_modified date. Reviewers: nsingh, sanketh, rmadhavan Reviewed By: rmadhavan Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38063 --- managed/yb_release | 1 + 1 file changed, 1 insertion(+) diff --git a/managed/yb_release b/managed/yb_release index 44c0dada8fc6..995473380a2d 100755 --- a/managed/yb_release +++ b/managed/yb_release @@ -14,6 +14,7 @@ fi set -euo pipefail export DEVOPS_HOME="${BASH_SOURCE%/*}"/devops +export SOURCE_DATE_EPOCH="$(date +%s)" . "$DEVOPS_HOME/bin/"/common.sh From c7af74d0890037d7d4a5e3dec9b0c2695f67a41d Mon Sep 17 00:00:00 2001 From: Jethro Mak <88681329+Jethro-M@users.noreply.github.com> Date: Mon, 16 Sep 2024 20:10:28 -0400 Subject: [PATCH 50/75] [PLAT-15288] Use set_dbs endpoint when editing table selection for db scoped DR configs Summary: For DB scoped DR configs, we have a different endpoint for the edit table flow. Instead of set_tables, the client should send a request to the `set_dbs` endpoint instead. This diff also modifies the form field for the set_dbs request and edit xCluster config form by changing the `databases` to `dbs`. This change is made to stay consistent with the `dbs` field used during db scoped DR creation. Test Plan: Verify that user is able to add databases to and remove databases from a db scoped DR config. Verify that the YBA UI still uses set_tables endpoint when the config is not db scoped. Reviewers: rmadhavan, cwang, hzare, vbansal Reviewed By: cwang Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38026 --- .../tasks/EditXClusterConfig.java | 6 ++-- .../yw/common/config/GlobalConfKeys.java | 8 ++--- .../yw/controllers/DrConfigController.java | 12 ++++--- .../controllers/XClusterConfigController.java | 2 +- .../yw/forms/DrConfigSetDatabasesForm.java | 2 +- .../yw/forms/XClusterConfigEditFormData.java | 2 +- managed/src/main/resources/reference.conf | 2 +- .../src/main/resources/swagger-strict.json | 4 +-- managed/src/main/resources/swagger.json | 4 +-- .../tasks/local/DRDbScopedLocalTest.java | 6 ++-- .../controllers/DrConfigControllerTest.java | 32 +++++++++---------- .../components/xcluster/ReplicationUtils.tsx | 2 +- .../disasterRecovery/DrConfigStateLabel.tsx | 2 +- .../createConfig/CreateConfigModal.tsx | 2 +- .../xcluster/disasterRecovery/dtos.ts | 2 +- .../editTables/EditTablesModal.tsx | 27 +++++++--------- .../xcluster/disasterRecovery/utils.ts | 2 +- .../xcluster/icons/ReplicationIcon.tsx | 2 +- .../bootstrapSummary/BootstrapSummary.tsx | 2 +- managed/ui/src/redesign/helpers/api.ts | 9 ++++++ managed/ui/src/redesign/helpers/constants.ts | 2 +- 21 files changed, 69 insertions(+), 63 deletions(-) diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/EditXClusterConfig.java b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/EditXClusterConfig.java index f60fe45edbe6..559dbc6b9097 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/EditXClusterConfig.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/EditXClusterConfig.java @@ -100,7 +100,7 @@ public void run() { if (!CollectionUtils.isEmpty(taskParams().getTableIdsToRemove())) { createSubTaskToRemoveTables(xClusterConfig, sourceUniverse); } - } else if (editFormData.databases != null) { // Used for DB scoped replication only. + } else if (editFormData.dbs != null) { // Used for DB scoped replication only. if (!xClusterConfig.getType().equals(ConfigType.Db)) { throw new IllegalArgumentException( "The databases must be provided only for DB scoped replication"); @@ -148,9 +148,9 @@ public void run() { xClusterConfig.updateStatusForTables( tablesInPendingStatus, XClusterTableConfig.Status.Failed); } - if (editFormData.databases != null) { + if (editFormData.dbs != null) { // Set databases in updating status to failed. - Set dbIds = editFormData.databases; + Set dbIds = editFormData.dbs; Set namespacesInPendingStatus = xClusterConfig.getNamespaceIdsInStatus( dbIds, X_CLUSTER_NAMESPACE_CONFIG_PENDING_STATUS_LIST); diff --git a/managed/src/main/java/com/yugabyte/yw/common/config/GlobalConfKeys.java b/managed/src/main/java/com/yugabyte/yw/common/config/GlobalConfKeys.java index c49d269ac2a7..3f32c11ff495 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/config/GlobalConfKeys.java +++ b/managed/src/main/java/com/yugabyte/yw/common/config/GlobalConfKeys.java @@ -785,12 +785,12 @@ public class GlobalConfKeys extends RuntimeConfigKeysModule { "It indicates whether creating disaster recovery configs are enabled", ConfDataType.BooleanType, ImmutableList.of(ConfKeyTags.PUBLIC)); - public static final ConfKeyInfo dbScopedXClusterEnabled = + public static final ConfKeyInfo dbScopedXClusterCreationEnabled = new ConfKeyInfo<>( - "yb.xcluster.db_scoped.enabled", + "yb.xcluster.db_scoped.creationEnabled", ScopeType.GLOBAL, - "Flag to enable db scoped xcluster replication", - "If flag is enabled, allows DR support with db scoped xcluster replication", + "Flag to enable db scoped xCluster replication creation", + "If flag is enabled, allows DR creation with db scoped xCluster replication", ConfDataType.BooleanType, ImmutableList.of(ConfKeyTags.INTERNAL)); public static final ConfKeyInfo xclusterEnableAutoFlagValidation = diff --git a/managed/src/main/java/com/yugabyte/yw/controllers/DrConfigController.java b/managed/src/main/java/com/yugabyte/yw/controllers/DrConfigController.java index 7bf7c5ec89c7..8455846c8fd1 100644 --- a/managed/src/main/java/com/yugabyte/yw/controllers/DrConfigController.java +++ b/managed/src/main/java/com/yugabyte/yw/controllers/DrConfigController.java @@ -183,12 +183,14 @@ public Result create(UUID customerUUID, Http.Request request) { } boolean isDbScoped = - confGetter.getGlobalConf(GlobalConfKeys.dbScopedXClusterEnabled) || createForm.dbScoped; - if (!confGetter.getGlobalConf(GlobalConfKeys.dbScopedXClusterEnabled) && createForm.dbScoped) { + confGetter.getGlobalConf(GlobalConfKeys.dbScopedXClusterCreationEnabled) + || createForm.dbScoped; + if (!confGetter.getGlobalConf(GlobalConfKeys.dbScopedXClusterCreationEnabled) + && createForm.dbScoped) { throw new PlatformServiceException( BAD_REQUEST, "Support for db scoped disaster recovery configs is disabled in YBA. You may enable it " - + "by setting yb.xcluster.db_scoped.enabled to true in the application.conf"); + + "by setting yb.xcluster.db_scoped.creationEnabled to true in the application.conf"); } if (isDbScoped) { @@ -1632,7 +1634,7 @@ public Result setDatabases(UUID customerUUID, UUID drConfigUuid, Http.Request re } DrConfigSetDatabasesForm setDatabasesForm = parseSetDatabasesForm(customerUUID, request); Set existingDatabaseIds = xClusterConfig.getDbIds(); - Set newDatabaseIds = setDatabasesForm.databases; + Set newDatabaseIds = setDatabasesForm.dbs; Set databaseIdsToAdd = Sets.difference(newDatabaseIds, existingDatabaseIds); Set databaseIdsToRemove = Sets.difference(existingDatabaseIds, newDatabaseIds); if (databaseIdsToAdd.isEmpty() && databaseIdsToRemove.isEmpty()) { @@ -1756,7 +1758,7 @@ private DrConfigSetDatabasesForm parseSetDatabasesForm(UUID customerUUID, Http.R DrConfigSetDatabasesForm formData = formFactory.getFormDataOrBadRequest( request.body().asJson(), DrConfigSetDatabasesForm.class); - formData.databases = XClusterConfigTaskBase.convertUuidStringsToIdStringSet(formData.databases); + formData.dbs = XClusterConfigTaskBase.convertUuidStringsToIdStringSet(formData.dbs); return formData; } diff --git a/managed/src/main/java/com/yugabyte/yw/controllers/XClusterConfigController.java b/managed/src/main/java/com/yugabyte/yw/controllers/XClusterConfigController.java index 9f195e445b3c..f611619dff3c 100644 --- a/managed/src/main/java/com/yugabyte/yw/controllers/XClusterConfigController.java +++ b/managed/src/main/java/com/yugabyte/yw/controllers/XClusterConfigController.java @@ -528,7 +528,7 @@ static XClusterConfigTaskParams getSetDatabasesTaskParams( Set databaseIdsToRemove) { XClusterConfigEditFormData editForm = new XClusterConfigEditFormData(); - editForm.databases = databaseIds; + editForm.dbs = databaseIds; return new XClusterConfigTaskParams( xClusterConfig, bootstrapParams, editForm, databaseIdsToAdd, databaseIdsToRemove); diff --git a/managed/src/main/java/com/yugabyte/yw/forms/DrConfigSetDatabasesForm.java b/managed/src/main/java/com/yugabyte/yw/forms/DrConfigSetDatabasesForm.java index 57f69c7e8d1b..fa7389ab9374 100644 --- a/managed/src/main/java/com/yugabyte/yw/forms/DrConfigSetDatabasesForm.java +++ b/managed/src/main/java/com/yugabyte/yw/forms/DrConfigSetDatabasesForm.java @@ -14,5 +14,5 @@ public class DrConfigSetDatabasesForm { example = "[\"000033df000030008000000000004006\", \"000033df00003000800000000000400b\"]") @YbaApi(visibility = YbaApi.YbaApiVisibility.PREVIEW, sinceYBAVersion = "2.23.0.0") @Required - public Set databases; + public Set dbs; } diff --git a/managed/src/main/java/com/yugabyte/yw/forms/XClusterConfigEditFormData.java b/managed/src/main/java/com/yugabyte/yw/forms/XClusterConfigEditFormData.java index 8334431e3d1e..5524d69fceee 100644 --- a/managed/src/main/java/com/yugabyte/yw/forms/XClusterConfigEditFormData.java +++ b/managed/src/main/java/com/yugabyte/yw/forms/XClusterConfigEditFormData.java @@ -36,7 +36,7 @@ public class XClusterConfigEditFormData { value = "WARNING: This is a preview API that could change. Source universe database IDs", example = "[\"000033df000030008000000000004006\", \"000033df00003000800000000000400b\"]") @YbaApi(visibility = YbaApi.YbaApiVisibility.PREVIEW, sinceYBAVersion = "2.23.0.0") - public Set databases; + public Set dbs; @ApiModelProperty( value = diff --git a/managed/src/main/resources/reference.conf b/managed/src/main/resources/reference.conf index ac8a11e1c822..e85740662326 100644 --- a/managed/src/main/resources/reference.conf +++ b/managed/src/main/resources/reference.conf @@ -251,7 +251,7 @@ yb { xcluster { db_scoped { - enabled = false + creationEnabled = false } bootstrap_producer_timeout = 2 minutes k8s_tls_support = true diff --git a/managed/src/main/resources/swagger-strict.json b/managed/src/main/resources/swagger-strict.json index 3c814caf61da..081694e9d8fd 100644 --- a/managed/src/main/resources/swagger-strict.json +++ b/managed/src/main/resources/swagger-strict.json @@ -5227,7 +5227,7 @@ "DrConfigSetDatabasesForm" : { "description" : "dr config set databases form", "properties" : { - "databases" : { + "dbs" : { "description" : "WARNING: This is a preview API that could change. Source universe database IDs", "example" : "[\"000033df000030008000000000004006\", \"000033df00003000800000000000400b\"]", "items" : { @@ -16083,7 +16083,7 @@ "$ref" : "#/definitions/BootstrapParams", "description" : "Parameters needed for the bootstrap flow including backup/restore" }, - "databases" : { + "dbs" : { "description" : "WARNING: This is a preview API that could change. Source universe database IDs", "example" : "[\"000033df000030008000000000004006\", \"000033df00003000800000000000400b\"]", "items" : { diff --git a/managed/src/main/resources/swagger.json b/managed/src/main/resources/swagger.json index d7ed48845fcd..6864c99cb78d 100644 --- a/managed/src/main/resources/swagger.json +++ b/managed/src/main/resources/swagger.json @@ -5262,7 +5262,7 @@ "DrConfigSetDatabasesForm" : { "description" : "dr config set databases form", "properties" : { - "databases" : { + "dbs" : { "description" : "WARNING: This is a preview API that could change. Source universe database IDs", "example" : "[\"000033df000030008000000000004006\", \"000033df00003000800000000000400b\"]", "items" : { @@ -16236,7 +16236,7 @@ "$ref" : "#/definitions/BootstrapParams", "description" : "Parameters needed for the bootstrap flow including backup/restore" }, - "databases" : { + "dbs" : { "description" : "WARNING: This is a preview API that could change. Source universe database IDs", "example" : "[\"000033df000030008000000000004006\", \"000033df00003000800000000000400b\"]", "items" : { diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/DRDbScopedLocalTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/DRDbScopedLocalTest.java index 05ce54c5a707..363e2b65ac69 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/DRDbScopedLocalTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/DRDbScopedLocalTest.java @@ -67,7 +67,7 @@ public void setupDrDbScoped() { runtimeConfService.setKey( customer.getUuid(), ScopedRuntimeConfig.GLOBAL_SCOPE_UUID, - GlobalConfKeys.dbScopedXClusterEnabled.getKey(), + GlobalConfKeys.dbScopedXClusterCreationEnabled.getKey(), "true", true); @@ -283,10 +283,10 @@ public void testDrDbScopedUpdate() throws InterruptedException { List updateNamespaceNames = Arrays.asList("dbcolocated"); DrConfigSetDatabasesForm setDatabasesFormData = new DrConfigSetDatabasesForm(); - setDatabasesFormData.databases = new HashSet(); + setDatabasesFormData.dbs = new HashSet(); for (TableInfoForm.NamespaceInfoResp namespace : namespaceInfo) { if (updateNamespaceNames.contains(namespace.name)) { - setDatabasesFormData.databases.add(namespace.namespaceUUID.toString()); + setDatabasesFormData.dbs.add(namespace.namespaceUUID.toString()); } } diff --git a/managed/src/test/java/com/yugabyte/yw/controllers/DrConfigControllerTest.java b/managed/src/test/java/com/yugabyte/yw/controllers/DrConfigControllerTest.java index 13587ab512bb..b0904994b734 100644 --- a/managed/src/test/java/com/yugabyte/yw/controllers/DrConfigControllerTest.java +++ b/managed/src/test/java/com/yugabyte/yw/controllers/DrConfigControllerTest.java @@ -202,12 +202,12 @@ public void setUp() { } @Test - // Runtime config `yb.xcluster.db_scoped.enabled` = true and db scoped parameter is passed in - // as true for request body. + // Runtime config `yb.xcluster.db_scoped.creationEnabled` = true and db scoped parameter is passed + // in as true for request body. public void testCreateDbScopedSuccess() { settableRuntimeConfigFactory .globalRuntimeConf() - .setValue("yb.xcluster.db_scoped.enabled", "true"); + .setValue("yb.xcluster.db_scoped.creationEnabled", "true"); DrConfigCreateForm data = createDefaultCreateForm("dbScopedDR", true); UUID taskUUID = buildTaskInfo(null, TaskType.CreateDrConfig); when(mockCommissioner.submit(any(), any())).thenReturn(taskUUID); @@ -230,11 +230,11 @@ public void testCreateDbScopedSuccess() { } @Test - // Runtime config `yb.xcluster.db_scoped.enabled` = true with no parameter. + // Runtime config `yb.xcluster.db_scoped.creationEnabled` = true with no parameter. public void testSetDatabasesSuccess() { settableRuntimeConfigFactory .globalRuntimeConf() - .setValue("yb.xcluster.db_scoped.enabled", "true"); + .setValue("yb.xcluster.db_scoped.creationEnabled", "true"); DrConfigCreateForm data = createDefaultCreateForm("dbScopedDR", null); UUID taskUUID = buildTaskInfo(null, TaskType.CreateDrConfig); when(mockCommissioner.submit(any(), any())).thenReturn(taskUUID); @@ -254,7 +254,7 @@ public void testSetDatabasesSuccess() { assertNotNull(drConfig); UUID drConfigId = drConfig.getUuid(); DrConfigSetDatabasesForm setDatabasesData = new DrConfigSetDatabasesForm(); - setDatabasesData.databases = new HashSet<>(Set.of("db1", "db2")); + setDatabasesData.dbs = new HashSet<>(Set.of("db1", "db2")); XClusterConfig xClusterConfig = drConfig.getActiveXClusterConfig(); xClusterConfig.updateStatus(XClusterConfigStatusType.Running); drConfig.setState(State.Replicating); @@ -276,7 +276,7 @@ public void testSetDatabasesSuccess() { assertOk(result); // Try adding a database and deleting a database. - setDatabasesData.databases = new HashSet<>(Set.of("db2", "db3")); + setDatabasesData.dbs = new HashSet<>(Set.of("db2", "db3")); xClusterConfig = drConfig.getActiveXClusterConfig(); xClusterConfig.updateStatus(XClusterConfigStatusType.Running); @@ -297,11 +297,11 @@ public void testSetDatabasesSuccess() { } @Test - // Runtime config `yb.xcluster.db_scoped.enabled` = true with no parameter. + // Runtime config `yb.xcluster.db_scoped.creationEnabled` = true with no parameter. public void testSetDatabasesFailureNoChange() { settableRuntimeConfigFactory .globalRuntimeConf() - .setValue("yb.xcluster.db_scoped.enabled", "true"); + .setValue("yb.xcluster.db_scoped.creationEnabled", "true"); DrConfigCreateForm data = createDefaultCreateForm("dbScopedDR", null); UUID taskUUID = buildTaskInfo(null, TaskType.CreateDrConfig); when(mockCommissioner.submit(any(), any())).thenReturn(taskUUID); @@ -320,7 +320,7 @@ public void testSetDatabasesFailureNoChange() { assertNotNull(drConfig); UUID drConfigId = drConfig.getUuid(); DrConfigSetDatabasesForm setDatabasesData = new DrConfigSetDatabasesForm(); - setDatabasesData.databases = new HashSet<>(Set.of(namespaceId)); + setDatabasesData.dbs = new HashSet<>(Set.of(namespaceId)); XClusterConfig xClusterConfig = drConfig.getActiveXClusterConfig(); xClusterConfig.updateStatus(XClusterConfigStatusType.Running); xClusterConfig.updateStatusForNamespace(namespaceId, XClusterNamespaceConfig.Status.Running); @@ -347,11 +347,11 @@ public void testSetDatabasesFailureNoChange() { } @Test - // Runtime config `yb.xcluster.db_scoped.enabled` = true with no parameter. + // Runtime config `yb.xcluster.db_scoped.creationEnabled` = true with no parameter. public void testSetDatabasesFailureNoDbs() { settableRuntimeConfigFactory .globalRuntimeConf() - .setValue("yb.xcluster.db_scoped.enabled", "true"); + .setValue("yb.xcluster.db_scoped.creationEnabled", "true"); DrConfigCreateForm data = createDefaultCreateForm("dbScopedDR", null); UUID taskUUID = buildTaskInfo(null, TaskType.CreateDrConfig); when(mockCommissioner.submit(any(), any())).thenReturn(taskUUID); @@ -377,7 +377,7 @@ public void testSetDatabasesFailureNoDbs() { drConfig.update(); // Try giving an empty list. - setDatabasesData.databases = new HashSet<>(); + setDatabasesData.dbs = new HashSet<>(); xClusterConfig = drConfig.getActiveXClusterConfig(); xClusterConfig.updateStatus(XClusterConfigStatusType.Running); Exception exception = @@ -397,12 +397,12 @@ public void testSetDatabasesFailureNoDbs() { } @Test - // Runtime config `yb.xcluster.db_scoped.enabled` is disabled but db scoped parameter is passed in - // as true for request body. + // Runtime config `yb.xcluster.db_scoped.creationEnabled` is disabled but db scoped parameter is + // passed in as true for request body. public void testCreateDbScopedDisabledFailure() { settableRuntimeConfigFactory .globalRuntimeConf() - .setValue("yb.xcluster.db_scoped.enabled", "false"); + .setValue("yb.xcluster.db_scoped.creationEnabled", "false"); DrConfigCreateForm data = createDefaultCreateForm("dbScopedDR", true); buildTaskInfo(null, TaskType.CreateDrConfig); diff --git a/managed/ui/src/components/xcluster/ReplicationUtils.tsx b/managed/ui/src/components/xcluster/ReplicationUtils.tsx index 7b4b616f8b85..bd3f455f993a 100644 --- a/managed/ui/src/components/xcluster/ReplicationUtils.tsx +++ b/managed/ui/src/components/xcluster/ReplicationUtils.tsx @@ -466,7 +466,7 @@ export const getEnabledConfigActions = ( isXClusterConfigAllBidirectional: boolean, drConfigState?: DrConfigState ): XClusterConfigAction[] => { - if (drConfigState === DrConfigState.ERROR) { + if (drConfigState === DrConfigState.FAILED) { // When DR config is in error state, we only allow the DR config delete operation. return []; } diff --git a/managed/ui/src/components/xcluster/disasterRecovery/DrConfigStateLabel.tsx b/managed/ui/src/components/xcluster/disasterRecovery/DrConfigStateLabel.tsx index 22c5796c4db3..e4ae4d30ab7e 100644 --- a/managed/ui/src/components/xcluster/disasterRecovery/DrConfigStateLabel.tsx +++ b/managed/ui/src/components/xcluster/disasterRecovery/DrConfigStateLabel.tsx @@ -83,7 +83,7 @@ export const DrConfigStateLabel = ({ drConfig, variant = 'body2' }: DrConfigStat ); - case DrConfigState.ERROR: + case DrConfigState.FAILED: return ( config.key === RuntimeConfigKey.XCLUSTER_DB_SCOPED_FEATURE_FLAG + (config: any) => config.key === RuntimeConfigKey.XCLUSTER_DB_SCOPED_CREATION_FEATURE_FLAG )?.value ?? false; const onSubmit: SubmitHandler = async (formValues) => { diff --git a/managed/ui/src/components/xcluster/disasterRecovery/dtos.ts b/managed/ui/src/components/xcluster/disasterRecovery/dtos.ts index 3c3e8e38365d..9b8017ca641f 100644 --- a/managed/ui/src/components/xcluster/disasterRecovery/dtos.ts +++ b/managed/ui/src/components/xcluster/disasterRecovery/dtos.ts @@ -58,7 +58,7 @@ export const DrConfigState = { SWITCHOVER_IN_PROGRESS: 'Switchover in Progress', FAILOVER_IN_PROGRESS: 'Failover in Progress', HALTED: 'Halted', - ERROR: 'Error' + FAILED: 'Failed' } as const; export type DrConfigState = typeof DrConfigState[keyof typeof DrConfigState]; diff --git a/managed/ui/src/components/xcluster/disasterRecovery/editTables/EditTablesModal.tsx b/managed/ui/src/components/xcluster/disasterRecovery/editTables/EditTablesModal.tsx index a1b36025779a..5d07fc0666c6 100644 --- a/managed/ui/src/components/xcluster/disasterRecovery/editTables/EditTablesModal.tsx +++ b/managed/ui/src/components/xcluster/disasterRecovery/editTables/EditTablesModal.tsx @@ -16,14 +16,14 @@ import { YBButton, YBModal, YBModalProps } from '../../../../redesign/components import { api, drConfigQueryKey, - runtimeConfigQueryKey, universeQueryKey, xClusterQueryKey } from '../../../../redesign/helpers/api'; import { assertUnreachableCase, handleServerError } from '../../../../utils/errorHandlingUtils'; import { YBErrorIndicator, YBLoading } from '../../../common/indicators'; -import { XClusterConfigAction, XClusterTableStatus } from '../../constants'; +import { XClusterConfigAction, XClusterConfigType, XClusterTableStatus } from '../../constants'; import { + formatUuidForXCluster, getCategorizedNeedBootstrapPerTableResponse, getInConfigTableUuidsToTableDetailsMap, getXClusterConfigTableType, @@ -114,19 +114,19 @@ export const EditTablesModal = (props: EditTablesModalProps) => { universeQueryKey.namespaces(xClusterConfigQuery.data?.sourceUniverseUUID), () => api.fetchUniverseNamespaces(xClusterConfigQuery.data?.sourceUniverseUUID) ); - const customerUuid = localStorage.getItem('customerId') ?? ''; - const runtimeConfigQuery = useQuery(runtimeConfigQueryKey.customerScope(customerUuid), () => - api.fetchRuntimeConfigs(customerUuid, true) - ); const editTableMutation = useMutation( (formValues: EditTablesFormValues) => { const bootstrapRequiredTableUuids = categorizedNeedBootstrapPerTableResponse?.bootstrapTableUuids ?? []; return props.isDrInterface - ? api.updateTablesInDr(props.drConfigUuid, { - tables: formValues.tableUuids - }) + ? xClusterConfigQuery.data?.type === XClusterConfigType.DB_SCOPED + ? api.updateDbsInDr(props.drConfigUuid, { + dbs: formValues.namespaceUuids.map(formatUuidForXCluster) + }) + : api.updateTablesInDr(props.drConfigUuid, { + tables: formValues.tableUuids + }) : editXClusterConfigTables(xClusterConfigUuid, { tables: formValues.tableUuids, autoIncludeIndexTables: shouldAutoIncludeIndexTables(xClusterConfigQuery.data), @@ -192,9 +192,7 @@ export const EditTablesModal = (props: EditTablesModalProps) => { sourceUniverseQuery.isLoading || sourceUniverseQuery.isIdle || sourceUniverseNamespacesQuery.isLoading || - sourceUniverseNamespacesQuery.isIdle || - runtimeConfigQuery.isLoading || - runtimeConfigQuery.isIdle + sourceUniverseNamespacesQuery.isIdle ) { return ( @@ -225,8 +223,7 @@ export const EditTablesModal = (props: EditTablesModalProps) => { !targetUniverseUuid || sourceUniverseQuery.isError || sourceUniverseNamespacesQuery.isError || - !xClusterConfigTableType || - runtimeConfigQuery.isError + !xClusterConfigTableType ) { const errorMessage = !xClusterConfig.sourceUniverseUUID ? t('error.undefinedSourceUniverseUuid') @@ -234,8 +231,6 @@ export const EditTablesModal = (props: EditTablesModalProps) => { ? t('error.undefinedTargetUniverseUuid') : !xClusterConfigTableType ? t('error.undefinedXClusterTableType', { keyPrefix: TRANSLATION_KEY_PREFIX_XCLUSTER }) - : runtimeConfigQuery.isError - ? t('failedToFetchCustomerRuntimeConfig', { keyPrefix: 'queryError' }) : t('error.fetchSourceUniverseDetailsFailure'); return ( diff --git a/managed/ui/src/components/xcluster/disasterRecovery/utils.ts b/managed/ui/src/components/xcluster/disasterRecovery/utils.ts index 95ff148b0e21..c8348af6afdd 100644 --- a/managed/ui/src/components/xcluster/disasterRecovery/utils.ts +++ b/managed/ui/src/components/xcluster/disasterRecovery/utils.ts @@ -29,7 +29,7 @@ export const getEnabledDrConfigActions = ( case DrConfigState.INITIALIZING: case DrConfigState.SWITCHOVER_IN_PROGRESS: case DrConfigState.FAILOVER_IN_PROGRESS: - case DrConfigState.ERROR: + case DrConfigState.FAILED: return [DrConfigAction.DELETE]; case DrConfigState.REPLICATING: return [ diff --git a/managed/ui/src/components/xcluster/icons/ReplicationIcon.tsx b/managed/ui/src/components/xcluster/icons/ReplicationIcon.tsx index 92ad98a337fc..6c043afa4311 100644 --- a/managed/ui/src/components/xcluster/icons/ReplicationIcon.tsx +++ b/managed/ui/src/components/xcluster/icons/ReplicationIcon.tsx @@ -44,7 +44,7 @@ export const ReplicationIcon = ({ drConfig }: ReplicationIconProps) => {
); case DrConfigState.REPLICATING: - case DrConfigState.ERROR: + case DrConfigState.FAILED: return ; case DrConfigState.HALTED: return ( diff --git a/managed/ui/src/components/xcluster/sharedComponents/bootstrapSummary/BootstrapSummary.tsx b/managed/ui/src/components/xcluster/sharedComponents/bootstrapSummary/BootstrapSummary.tsx index f3328b2aa5d9..330b6578d2b6 100644 --- a/managed/ui/src/components/xcluster/sharedComponents/bootstrapSummary/BootstrapSummary.tsx +++ b/managed/ui/src/components/xcluster/sharedComponents/bootstrapSummary/BootstrapSummary.tsx @@ -122,7 +122,7 @@ export const BootstrapSummary = (props: ConfigureBootstrapStepProps) => { targetTableMissing } = categorizedNeedBootstrapPerTableResponse; const skipBootstrap = watch('skipBootstrap'); - const runtimeConfigEntries = runtimeConfigQuery.data.configEntries ?? []; + const runtimeConfigEntries = runtimeConfigQuery.data?.configEntries ?? []; const isSkipBootstrappingEnabled = runtimeConfigEntries.some( (config: any) => diff --git a/managed/ui/src/redesign/helpers/api.ts b/managed/ui/src/redesign/helpers/api.ts index 037c3a88e570..540dd37d2a18 100644 --- a/managed/ui/src/redesign/helpers/api.ts +++ b/managed/ui/src/redesign/helpers/api.ts @@ -256,6 +256,10 @@ export interface UpdateTablesInDrRequest { autoIncludeIndexTables?: boolean; } +export interface UpdateDbsInDrRequest { + dbs: string[]; +} + export interface CreateHaConfigRequest { cluster_key: string; @@ -522,6 +526,11 @@ class ApiService { .then((response) => response.data); }; + updateDbsInDr = (drConfigUuid: string, updateDbsInDrRequest: UpdateDbsInDrRequest) => { + const requestUrl = `${ROOT_URL}/customers/${this.getCustomerId()}/dr_configs/${drConfigUuid}/set_dbs`; + return axios.put(requestUrl, updateDbsInDrRequest).then((response) => response.data); + }; + syncDrConfig = (drConfigUuid: string) => { const requestUrl = `${ROOT_URL}/customers/${this.getCustomerId()}/dr_configs/${drConfigUuid}/sync`; return axios.post(requestUrl).then((response) => response.data); diff --git a/managed/ui/src/redesign/helpers/constants.ts b/managed/ui/src/redesign/helpers/constants.ts index be072b4626de..e82314c0657f 100644 --- a/managed/ui/src/redesign/helpers/constants.ts +++ b/managed/ui/src/redesign/helpers/constants.ts @@ -44,7 +44,7 @@ export const RuntimeConfigKey = { XCLUSTER_TRANSACTIONAL_ATOMICITY_FEATURE_FLAG: 'yb.xcluster.transactional.enabled', ENABLE_XCLUSTER_SKIP_BOOTSTRAPPING: 'yb.ui.xcluster.enable_skip_bootstrapping', DISASTER_RECOVERY_FEATURE_FLAG: 'yb.xcluster.dr.enabled', - XCLUSTER_DB_SCOPED_FEATURE_FLAG: 'yb.xcluster.db_scoped.enabled', + XCLUSTER_DB_SCOPED_CREATION_FEATURE_FLAG: 'yb.xcluster.db_scoped.creationEnabled', PERFORMANCE_ADVISOR_UI_FEATURE_FLAG: 'yb.ui.feature_flags.perf_advisor', GRANULAR_METRICS_FEATURE_FLAG: 'yb.ui.feature_flags.granular_metrics', IS_UNIVERSE_AUTH_ENFORCED: 'yb.universe.auth.is_enforced', From 8faeca6605f2471b441ee59e63adc2a2bae50e98 Mon Sep 17 00:00:00 2001 From: Jethro Mak <88681329+Jethro-M@users.noreply.github.com> Date: Fri, 13 Sep 2024 00:53:54 -0400 Subject: [PATCH 51/75] [PLAT-15300] Update task progress poller logic Summary: **Context** It is possible for the task progress to be at 100%, while not being in failure state or success state. As a result, the UI will sometimes prematurely send a task completed toast notification even before the task is completed. **Change** The onTaskCompletion function will now only run if the task progress is at 100% and the status is `Success', Test Plan: - Run several different tasks that would lock and freeze a universe for update. This will create the subtask `createFreezeUniverseTask` which will report a percent completed field of 100% even before the actual intended task is completed. - Verify that the UI toast notification is only fired when the the main task is completed. Reviewers: rmadhavan, hzare, nsingh Reviewed By: nsingh Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38030 --- managed/ui/src/actions/xClusterReplication.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/managed/ui/src/actions/xClusterReplication.ts b/managed/ui/src/actions/xClusterReplication.ts index e7ff85c5169a..780a1ca62ba6 100644 --- a/managed/ui/src/actions/xClusterReplication.ts +++ b/managed/ui/src/actions/xClusterReplication.ts @@ -335,7 +335,7 @@ export function fetchTaskUntilItCompletes( } if (status === 'Failed' || status === 'Failure') { onTaskCompletion(true, resp); - } else if (percent === 100) { + } else if (percent === 100 && status === 'Success') { onTaskCompletion(false, resp.data); } else { setTimeout(retryTask, interval); From e4f59434de0b79a4684b1b2d47f9265f124f0b37 Mon Sep 17 00:00:00 2001 From: Dwight Hodge <79169168+ddhodge@users.noreply.github.com> Date: Tue, 17 Sep 2024 10:46:58 -0400 Subject: [PATCH 52/75] [doc][ybm] VictoriaMetrics (#23819) * VictoriaMetrics * Update docs/content/preview/yugabyte-cloud/cloud-monitor/managed-integrations.md Co-authored-by: bansal01yash <80818350+bansal01yash@users.noreply.github.com> * review comment * change log * review comment * review comment * date * review comment * release notes --------- Co-authored-by: bansal01yash <80818350+bansal01yash@users.noreply.github.com> --- .../cloud-monitor/managed-integrations.md | 45 ++++++++++ .../cloud-monitor/metrics-export.md | 9 +- .../cloud-monitor/monitor-activity.md | 2 - .../preview/yugabyte-cloud/release-notes.md | 84 ++++++++++--------- 4 files changed, 98 insertions(+), 42 deletions(-) diff --git a/docs/content/preview/yugabyte-cloud/cloud-monitor/managed-integrations.md b/docs/content/preview/yugabyte-cloud/cloud-monitor/managed-integrations.md index 49a39fff651f..a327e5644296 100644 --- a/docs/content/preview/yugabyte-cloud/cloud-monitor/managed-integrations.md +++ b/docs/content/preview/yugabyte-cloud/cloud-monitor/managed-integrations.md @@ -23,6 +23,7 @@ Currently, you can export data to the following tools: - [Grafana Cloud](https://grafana.com/docs/grafana-cloud/) - [Sumo Logic](https://www.sumologic.com) - [Prometheus](https://prometheus.io/docs/introduction/overview/) {{}} +- [VictoriaMetrics](https://docs.victoriametrics.com/) {{}} Exporting cluster metrics and logs counts against your data transfer allowance. This may incur additional costs for network transfer, especially for cross-region and internet-based transfers, if usage exceeds your cluster allowance. Refer to [Data transfer costs](../../cloud-admin/cloud-billing-costs/#data-transfer-costs). @@ -127,6 +128,50 @@ To create an export configuration, do the following: 1. On the **Integrations** page, click **Configure** for the Prometheus provider or, if a configuration is already available, **Add Configuration**. 1. Enter a name for the configuration. 1. Enter the endpoint URL of the Prometheus instance. + + The URL must be in the form + + ```sh + http:///api/v1/otlp + ``` + +1. Click **Create Configuration**. + + {{% /tab %}} + + {{% tab header="VictoriaMetrics" lang="victoria" %}} + +VictoriaMetrics integration is {{}} and only available for clusters deployed on AWS. + +The VictoriaMetrics integration requires the following: + +- VictoriaMetrics instance + - deployed in a VPC on AWS + - publically-accessible endpoint URL that resolves to the private IP of the VictoriaMetrics instance; the DNS for the endpoint must be in a public hosted zone in AWS. The URL must be in the form as described in [How to use OpenTelemetry metrics with VictoriaMetrics](https://docs.victoriametrics.com/guides/getting-started-with-opentelemetry/). + - VPC hosting the VictoriaMetrics instance has the following Inbound Security Group rules: + - Allow HTTP inbound traffic on port 80 for VictoriaMetrics endpoint URL (HTTP) + - Allow HTTPS inbound traffic on port 443 for VictoriaMetrics endpoint URL (HTTPS) + + See [Control traffic to your AWS resources using security groups](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html) in the AWS documentation. + +- YugabyteDB Aeon cluster from which you want to export metrics + - the cluster is [deployed in VPCs](../../cloud-basics/cloud-vpcs/cloud-add-vpc/) on AWS + - each region VPC is peered with the VPC hosting VictoriaMetrics. See [Peer VPCs](../../cloud-basics/cloud-vpcs/cloud-add-vpc-aws/). + + As each region of a cluster deployed in AWS has its own VPC, make sure that all the VPCs are peered and allow inbound access from VictoriaMetrics; this also applies to regions you add or change after deployment, and to read replicas. For information on VPC networking in YugabyteDB Aeon, see [VPC network overview](../../cloud-basics/cloud-vpcs/cloud-vpc-intro/). + +To create an export configuration, do the following: + +1. On the **Integrations** page, click **Configure** for the VictoriaMetrics provider or, if a configuration is already available, **Add Configuration**. +1. Enter a name for the configuration. +1. Enter the endpoint URL of the VictoriaMetrics instance. + + The URL must be in the form + + ```sh + http:///opentelemetry + ``` + 1. Click **Create Configuration**. {{% /tab %}} diff --git a/docs/content/preview/yugabyte-cloud/cloud-monitor/metrics-export.md b/docs/content/preview/yugabyte-cloud/cloud-monitor/metrics-export.md index 840c5176e94a..dda86752826e 100644 --- a/docs/content/preview/yugabyte-cloud/cloud-monitor/metrics-export.md +++ b/docs/content/preview/yugabyte-cloud/cloud-monitor/metrics-export.md @@ -15,15 +15,20 @@ type: docs You can export [cluster metrics](../overview/) to third-party tools for analysis and customization. 1. Create an export configuration. An export configuration defines the settings and login information for the tool that you want to export your metrics to. + + For information on the available integrations and instructions on creating an export configuration, refer to [Integrations](../managed-integrations/). + 1. Assign a configuration to the cluster. Once created, you can assign an export configuration to one or more clusters. While the connection is active, metrics are automatically streamed to the tool. -Currently, YugabyteDB Aeon supports export to [Datadog](https://docs.datadoghq.com/), [Grafana Cloud](https://grafana.com/docs/grafana-cloud/), and [Sumo Logic](https://www.sumologic.com). Metrics export is not available for Sandbox clusters. + You assign export configurations on the cluster **Settings > Export Metrics** page. + +Metrics export is not available for Sandbox clusters. Exporting metrics may incur costs for network transfer, especially for cross-region and internet-based transfers. Refer to [Data transfer costs](../../cloud-admin/cloud-billing-costs/#data-transfer-costs). ## Prerequisites -Create an integration configuration. A configuration defines the sign in credentials and settings for the tool that you want to export your logs to. Refer to [Integrations](../managed-integrations). +Create an export configuration. A configuration defines the sign in credentials and settings for the tool that you want to export your logs to. Refer to [Integrations](../managed-integrations). ## Export cluster metrics diff --git a/docs/content/preview/yugabyte-cloud/cloud-monitor/monitor-activity.md b/docs/content/preview/yugabyte-cloud/cloud-monitor/monitor-activity.md index 065d3fa932a8..bbaf6f330369 100644 --- a/docs/content/preview/yugabyte-cloud/cloud-monitor/monitor-activity.md +++ b/docs/content/preview/yugabyte-cloud/cloud-monitor/monitor-activity.md @@ -4,8 +4,6 @@ headerTitle: Monitor cluster activity linkTitle: Cluster activity description: View the activity on your cluster. headcontent: View the activity on your cluster -aliases: - - /preview/yugabyte-cloud/cloud-monitor/managed-integrations menu: preview_yugabyte-cloud: identifier: monitor-activity diff --git a/docs/content/preview/yugabyte-cloud/release-notes.md b/docs/content/preview/yugabyte-cloud/release-notes.md index fc52f45a5ce1..f8bdb4b80ca0 100644 --- a/docs/content/preview/yugabyte-cloud/release-notes.md +++ b/docs/content/preview/yugabyte-cloud/release-notes.md @@ -19,6 +19,14 @@ On this page: ## Change log +### September 17, 2024 + +**New features** + +- Support for [exporting cluster metrics](../cloud-monitor/managed-integrations/) from clusters deployed in AWS to [Prometheus](https://prometheus.io/docs/introduction/overview/) and [VictoriaMetrics](https://docs.victoriametrics.com/). These integrations are available as a [tech preview](/preview/releases/versioning/#feature-maturity). To try them out, send a request to {{% support-cloud %}}. +- Ability to set [alerts](../cloud-monitor/cloud-alerts/) for when a cluster reaches its limit for the allowed number of tablet peers. +- New Azure regions: Netherlands, Stockholm, and Doha. + ### August 15, 2024 **New features** @@ -44,7 +52,7 @@ On this page: ### June 24, 2024 -**New Features** +**New features** - YugabyteDB Managed is now YugabyteDB Aeon! [Learn more](https://www.yugabyte.com/blog/introducing-yugabytedb-aeon/). - Support for new pricing plans. Existing customers continue to be billed using classic pricing. [Learn more](https://www.yugabyte.com/pricing/). @@ -52,7 +60,7 @@ On this page: ### June 13, 2024 -**New Features** +**New features** - Support for exporting [PostgreSQL logs](https://www.postgresql.org/docs/11/runtime-config-logging.html) to third-party tools (such as Datadog) for security monitoring, to build operations and health dashboards, troubleshooting, and more. - New regions. Jakarta and Hyderabad on AWS, and Singapore on Azure. @@ -63,7 +71,7 @@ On this page: ### April 22, 2024 -**New Features** +**New features** - Support for incremental backups for faster backups with greater frequency. Incremental backups only include the data that has changed since the last backup, be it a full or incremental backup. - Ability to size each region in partition by region clusters to its load. Number of nodes, number of vCPUs, disk size, and IOPS can now be set independently for each region. Add extra horsepower in high-traffic regions, and provision lower-traffic regions with fewer nodes. @@ -83,7 +91,7 @@ On this page: ### February 8, 2024 -**New Features** +**New features** - Support for PingOne [federated authentication](../managed-security/managed-authentication/), which allows single sign-on access for your account users using their PingOne identity. @@ -91,7 +99,7 @@ On this page: ### January 31, 2024 -**New Features** +**New features** - Support for Okta [federated authentication](../managed-security/managed-authentication/), which allows single sign-on access for your account users using their Okta identities. @@ -103,13 +111,13 @@ On this page: ### December 27, 2023 -**New Features** +**New features** - Support for enhanced [fault tolerance](../cloud-basics/create-clusters-overview/#fault-tolerance). Clusters are fault tolerant, meaning they continue to serve reads and writes even with the loss of a node, availability zone, or region. You can now configure clusters with node- or region-level fault tolerance to be resilient to up to three domain outages. For example, you can create a cluster with region-level fault tolerance that can continue to serve reads and writes without interruption even if two of its regions become unavailable. ### December 4, 2023 -**New Features** +**New features** - Support for [federated authentication](../managed-security/managed-authentication/), which allows you to use an identity provider to manage access to your account. Initial support includes the Microsoft Entra ID (Azure AD) platform, providing single sign-on access for your account users using their Microsoft identities. - Added ability to [audit account login activity](../cloud-secure-clusters/cloud-activity/). Navigate to **Security > Activity > Access History** to review the access history, including the client IP address, activity type, number of attempts, timestamp, and result. @@ -122,7 +130,7 @@ On this page: ### November 16, 2023 -**New Features** +**New features** - [Product Labs](../../yugabyte-cloud/managed-labs/) provides an interactive, in-product learning experience. Learn about YugabyteDB features using real-world applications running on live YugabyteDB clusters. The first lab, Create Global Applications, demonstrates how to manage latencies using three different deployment strategies. - Support for [exporting cluster metrics](../cloud-monitor/managed-integrations/) to Sumo Logic. @@ -133,7 +141,7 @@ On this page: ### November 3, 2023 -**New Features** +**New features** - Ability to track usage per cluster over time. Navigate to **Usage & Billing > Usage** to view cumulative and daily usage of cluster compute, disk storage, cloud backup storage, and data transfer. - For Azure, all regions that are supported for single-region clusters are now available for multi-region clusters (including Virginia useast2, Tokyo, Seoul, Johannesburg, Texas, and Dubai). @@ -145,7 +153,7 @@ On this page: ### October 5, 2023 -**New Features** +**New features** - Support for creating [private service endpoints](../cloud-basics/cloud-vpcs/cloud-add-endpoint/) (PSEs) in the YugabyteDB Aeon UI (this feature was previously only available using the YugabyteDB Aeon CLI). Add PSEs to clusters to connect to your application VPC over a secure private link. Supports AWS PrivateLink and Azure Private Link. - Support for [exporting cluster metrics](../cloud-monitor/managed-integrations/) to Grafana Cloud. @@ -159,13 +167,13 @@ On this page: ### September 12, 2023 -**New Features** +**New features** - Support for multi-region clusters and read replicas in Azure. You can now create dedicated clusters with multi-region deployment in Azure, as well as read replicas. ### September 6, 2023 -**New Features** +**New features** - Support for enabling and disabling YugabyteDB [encryption at rest](../cloud-secure-clusters/managed-ear/) using a customer managed key and rotating keys on encrypted clusters. Clusters must be using YugabyteDB v2.16.7 or later. - Support for [exporting cluster metrics](../cloud-monitor/managed-integrations/) to Datadog. @@ -199,7 +207,7 @@ On this page: ### June 30, 2023 -**New Features** +**New features** - Support for deploying clusters on Microsoft Azure, including: - Global availability - deploy in 20 Azure regions worldwide. @@ -213,7 +221,7 @@ On this page: ### June 26, 2023 -**New Features** +**New features** - Support for using a customer managed key (CMK) in Google Cloud Key Management Service (KMS) to encrypt a dedicated cluster (preview release). When YugabyteDB encryption at rest is enabled, your can now encrypt your cluster using your own CMK residing in Google Cloud KMS or AWS KMS. @@ -225,7 +233,7 @@ On this page: ### June 7, 2023 -**New Features** +**New features** - Ability to create custom roles using Role-Based Access Control (RBAC) to precisely manage user access and permissions to match your organization's specific needs. This includes a new built-in Viewer role which offers a secure and restricted view of cluster information without the risk of unintended modifications. @@ -237,7 +245,7 @@ On this page: ### April 28, 2023 -**New Features** +**New features** - Support for using a customer managed key (CMK) to encrypt a dedicated cluster (preview release). When YugabyteDB encryption at rest is enabled, your cluster (including backups) is encrypted using your own CMK residing in AWS Key Management Service (KMS). @@ -249,7 +257,7 @@ On this page: ### March 27, 2023 -**New Features** +**New features** - [YugabyteDB Aeon CLI](../../yugabyte-cloud/managed-automation/managed-cli/). Use the YugabyteDB Aeon command line interface (ybm CLI) to deploy and manage your YugabyteDB Aeon database clusters from your terminal or IDE. - Support for AWS PrivateLink (preview release). Connect YugabyteDB Aeon clusters on AWS with other AWS resources via private endpoints. Currently only configurable via ybm CLI. @@ -282,7 +290,7 @@ On this page: ### February 8, 2023 -**New Features** +**New features** - Users can now request a [time-limited free trial](../managed-freetrial/) to explore all the YugabyteDB Aeon features. @@ -292,7 +300,7 @@ On this page: ### January 27, 2023 -**New Features** +**New features** - [YugabyteDB Aeon Terraform Provider](https://registry.terraform.io/providers/yugabyte/ybm/latest) generally available. Use the provider to deploy and manage your database clusters in YugabyteDB Aeon. @@ -314,7 +322,7 @@ On this page: ### December 21, 2022 -**New Features** +**New features** - Ability to add IP addresses to the cluster IP allow list during cluster creation. The **Create Cluster** wizard includes the new **Networking** page to configure connectivity for your cluster. Automatically detect and add your current IP address or the addresses of any peered VPC to the cluster. - Ability to connect to clusters deployed in VPCs from public IP addresses. For clusters deployed in VPCs, enable **Public Access** on the **Settings > Network Access** tab to connect from addresses outside the peered network. When enabled, a public IP address is added to each region of the cluster. You can view the private and public host addresses under **Connection Parameters** on the cluster **Settings > Infrastructure** tab. @@ -325,13 +333,13 @@ On this page: ### December 12, 2022 -**New Features** +**New features** - [YugabyteDB Aeon REST API](https://yugabyte.stoplight.io/docs/managed-apis/) generally available. Use the REST API to deploy and manage your database clusters in YugabyteDB Aeon programmatically. ### November 28, 2022 -**New Features** +**New features** - Support for multi-region clusters with [geo-partitioning](../../explore/multi-region-deployments/row-level-geo-partitioning/) using the new [Partition by region](../cloud-basics/create-clusters-topology/#partition-by-region) deployment. Geo-partitioning allows you to move data closer to users to achieve lower latency and higher performance, and meet data residency requirements to comply with regulations such as GDPR. - Support for [read replicas](../cloud-basics/create-clusters-topology/#read-replicas). Use read replicas to lower latencies for read requests from remote regions. @@ -343,14 +351,14 @@ On this page: ### November 15, 2022 -**New Features** +**New features** - Ability to view cluster health. YugabyteDB Aeon monitors the health of your clusters based on cluster alert conditions and displays the health as either Healthy, Needs Attention, or Unhealthy. - Ability to set alerts for failed nodes. Get notified when the number of failed nodes exceeds the threshold. ### November 4, 2022 -**New Features** +**New features** - Ability to reset slow queries for faster debugging of slow-running queries. - Ability to set a preferred region to tune the read and write latency for specific regions. Designating one region as preferred can reduce the number of network hops needed to process requests. The preferred region can be assigned during cluster creation, and set or changed after cluster creation. @@ -358,13 +366,13 @@ On this page: ### October 24, 2022 -**New Features** +**New features** - Support for role-based API keys. Assign [roles](../managed-security/managed-roles) to API keys; keys assigned a developer role can't be used to perform admin tasks. In addition, keys are no longer revoked if the user that created the key is deleted from the account. ### October 17, 2022 -**New Features** +**New features** - Ability to set alerts for cluster memory use and YSQL connections. Get notified when memory use or the number of YSQL connections in a cluster exceeds the threshold. High memory use or number of YSQL connections can indicate problems with your workload, such as unoptimized queries or problems with your application connection code. @@ -394,7 +402,7 @@ On this page: ### June 27, 2022 -**New Features** +**New features** - Performance Optimizer for scanning clusters for optimizations (preview release). Provides recommendations on index and schema improvements, and detects connection, query, and CPU skew to identify potentially hot nodes. - [YugabyteDB Aeon REST API](https://yugabyte.stoplight.io/docs/managed-apis/) (preview release). Use the REST API to deploy and manage your database clusters in YugabyteDB Aeon programmatically. @@ -402,7 +410,7 @@ On this page: ### June 22, 2022 -**New Features** +**New features** - Support for creating multi-region replicated clusters (preview release). Create clusters that are resilient to region-level outages, with data synchronously replicated across 3 regions. @@ -416,7 +424,7 @@ On this page: ### June 14, 2022 -**New Features** +**New features** - Support for social logins. Sign up and log in to YugabyteDB Aeon using your existing Google, LinkedIn, or GitHub account. Admin users can manage the available login methods from the **Authentication** tab on the **Security** page. @@ -441,7 +449,7 @@ On this page: ### March 31, 2022 -**New Features** +**New features** - Self-guided quickstart incorporated in Cloud Shell. Launch Cloud Shell using the YSQL API to begin a [self-guided tutorial](../cloud-quickstart/qs-explore/) exploring distributed SQL. @@ -451,14 +459,14 @@ On this page: ### March 10, 2022 -**New Features** +**New features** - Ability to schedule the maintenance window and exclusion periods for upcoming maintenance and database upgrades. The maintenance window is a weekly four-hour time slot during which Yugabyte may maintain or upgrade clusters. Yugabyte does not maintain or upgrade clusters outside the scheduled maintenance window, or during exclusion periods. Manage maintenance windows on the cluster **Maintenance** tab. - Ability to manually pause and resume clusters. To pause a cluster, select the cluster, click **Actions**, and choose **Pause Cluster**. Yugabyte suspends instance vCPU capacity charges for paused clusters; disk and backup storage are charged at the standard rate. ### February 3, 2022 -**New Features** +**New features** - Ability to select the [version](../../faq/yugabytedb-managed-faq/#what-version-of-yugabytedb-does-my-cluster-run-on) of YugabyteDB to install on a cluster when [creating Dedicated clusters](../cloud-basics/create-clusters/). - Automated notifications of upcoming database maintenance. The notification email includes the date and time of the maintenance window. An Upcoming Maintenance badge is also displayed on the cluster. Start an upgrade any time by signing in to YugabyteDB Aeon, selecting the cluster, clicking the **Upcoming Maintenance** badge, and clicking **Upgrade Now**. @@ -475,7 +483,7 @@ On this page: ### January 27, 2022 -**New Features** +**New features** - Support for [alerts](../cloud-monitor/cloud-alerts/) to notify you and your team members when cluster and database resource usage exceeds predefined limits, or of potential billing issues. Configure alerts and view notifications on the **Alerts** page. When an alert triggers, YugabyteDB Aeon sends an email notification and displays a notification on the **Notifications** tab. When the alert condition resolves, the notification dismisses automatically. Alerts are enabled for all clusters in your account. - Sandbox clusters are now [paused](../../faq/yugabytedb-managed-faq/#why-is-my-sandbox-cluster-paused) after 21 days of inactivity. YugabyteDB Aeon sends a notification when your cluster is paused. To keep a cluster from being paused, perform an action as described in [What qualifies as activity on a cluster?](../../faq/yugabytedb-managed-faq/#what-qualifies-as-activity-on-a-cluster) Sandbox clusters are deleted after 30 days of inactivity. @@ -487,7 +495,7 @@ On this page: ### December 16, 2021 -**New Features** +**New features** - Self service [Virtual Private Cloud (VPC) networking](../cloud-basics/cloud-vpcs/). Use VPC networks to lower network latencies and make your application and database infrastructure more secure. Create VPCs in AWS or GCP and peer them with application VPCs in the same cloud provider. VPC networking is managed on the **VPC Network** tab of the **Networking** page. - Ability to [enable pre-bundled extensions](../cloud-clusters/add-extensions/) using the `CREATE EXTENSION` command. YugabyteDB includes [pre-bundled PostgreSQL extensions](../../explore/ysql-language-features/pg-extensions/) that are tested to work with YSQL. Admin users now have additional permissions to allow them to enable these extensions in databases. (If you need to install a database extension that is not pre-bundled, contact {{% support-cloud %}} @@ -495,7 +503,7 @@ On this page: ### December 2, 2021 -**New Features** +**New features** - Additional [performance metrics](../cloud-monitor/overview/). The new cluster **Performance Metrics** tab features new metrics including YSQL and YCQL operations per second, YSQL and YCQL latency, network bytes per second, and more. Use these metrics to ensure the cluster configuration matches its performance requirements. - Ability to review running queries using the [Live Queries](../cloud-monitor/cloud-queries-live/) on the cluster **Performance** tab. Use this information to visually identify relevant database operations and evaluate query execution times. @@ -507,7 +515,7 @@ On this page: ### November 18, 2021 -**New Features** +**New features** - Support for auditing account activity using the new **Activity** tab on the **Security** page. The tab provides a running audit of activity, including: @@ -526,14 +534,14 @@ On this page: ### October 5, 2021 -**New Features** +**New features** - The [YugabyteDB Aeon Status](https://status.yugabyte.cloud/) page shows the current uptime status of YugabyteDB Aeon and the [Yugabyte Support Portal](https://support.yugabyte.com/), along with maintenance notices and incident reports. - Ability to review cluster activity using the new cluster **Activity** tab. ### September 15, 2021 -**New Features** +**New features** - Ability to [create clusters](../cloud-basics/create-clusters/) suitable for production workloads. YugabyteDB Aeon clusters support horizontal and vertical scaling, VPC peering, and scheduled and manual backups. - Billing support. Set up a billing profile, manage payment methods, and review invoices on the [Billing](../cloud-admin/cloud-billing-profile) tab. (You must create a billing profile and add a payment method before you can create any clusters apart from your Sandbox cluster.) From 70aa7d792702818f7964bff356ebff83e387bfb3 Mon Sep 17 00:00:00 2001 From: Dwight Hodge <79169168+ddhodge@users.noreply.github.com> Date: Tue, 17 Sep 2024 11:02:50 -0400 Subject: [PATCH 53/75] [doc][ybm] Tablet peer alert (#23942) * Tablet peer alert * edit * Update docs/content/preview/yugabyte-cloud/cloud-monitor/cloud-alerts.md Co-authored-by: Daniel Isen <54864769+daniel-yb@users.noreply.github.com> * new Azure regions --------- Co-authored-by: Daniel Isen <54864769+daniel-yb@users.noreply.github.com> --- docs/content/preview/architecture/yb-tserver.md | 4 ++-- .../cloud-basics/create-clusters-overview.md | 2 ++ .../yugabyte-cloud/cloud-monitor/cloud-alerts.md | 14 +++++++++++++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/docs/content/preview/architecture/yb-tserver.md b/docs/content/preview/architecture/yb-tserver.md index c874efe3b135..b7a49853990d 100644 --- a/docs/content/preview/architecture/yb-tserver.md +++ b/docs/content/preview/architecture/yb-tserver.md @@ -19,11 +19,11 @@ The YugabyteDB Tablet Server (YB-TServer) service is responsible for the input-o The following diagram depicts a basic four-node YugabyteDB universe, with one table that has 4 tablets and a replication factor of 3: -![Tserver overview](/images/architecture/tserver_overview.png) +![TServer overview](/images/architecture/tserver_overview.png) The tablet-peers corresponding to each tablet hosted on different YB-TServers form a Raft group and replicate data between each other. The system shown in the preceding diagram includes sixteen independent Raft groups. For more information, see [Replication layer](../docdb-replication/). -Within each YB-TServer, cross-tablet intelligence is employed to maximize resource efficiency. There are multiple ways the YB-TServer coordinates operations across the tablets it hosts. +In each YB-TServer, cross-tablet intelligence is employed to maximize resource efficiency. There are multiple ways the YB-TServer coordinates operations across the tablets it hosts. ## Server-global block cache diff --git a/docs/content/preview/yugabyte-cloud/cloud-basics/create-clusters-overview.md b/docs/content/preview/yugabyte-cloud/cloud-basics/create-clusters-overview.md index cf9b1a2e3826..c0924861e60e 100644 --- a/docs/content/preview/yugabyte-cloud/cloud-basics/create-clusters-overview.md +++ b/docs/content/preview/yugabyte-cloud/cloud-basics/create-clusters-overview.md @@ -289,9 +289,11 @@ The following **Azure regions** are available: - Seoul (koreacentral) - Ireland (northeurope) - Norway (norwayeast) +- Doha (qatarcentral) - Johannesburg (southafricanorth) - Texas (southcentralus) - Singapore (southeastasia) +- Stockholm (swedencentral) - Zurich (switzerlandnorth) - Dubai (uaenorth) - London (uksouth) diff --git a/docs/content/preview/yugabyte-cloud/cloud-monitor/cloud-alerts.md b/docs/content/preview/yugabyte-cloud/cloud-monitor/cloud-alerts.md index 5a1cbc771cb4..068be033dc53 100644 --- a/docs/content/preview/yugabyte-cloud/cloud-monitor/cloud-alerts.md +++ b/docs/content/preview/yugabyte-cloud/cloud-monitor/cloud-alerts.md @@ -53,7 +53,7 @@ YugabyteDB monitors the health of your clusters based on [cluster alert](#cluste | Status | Alert | Level | | :----- | :---- | :---- | | Healthy | No alerts
[Disk throughput](#fix-throughput-alerts)
[Disk IOPS](#fix-iops-alerts)
[Fewer than 34% of nodes down](#fix-nodes-reporting-as-down-alerts) |
Warning
Warning
Info | -| Needs Attention | [Node free storage](#fix-storage-alerts)
[More than 34% of nodes down](#fix-nodes-reporting-as-down-alerts)
[Memory Utilization](#fix-memory-alerts)
[YSQL Connections](#fix-ysql-connection-alerts)
[CPU Utilization](#fix-cpu-alerts) | Warning or Severe
Warning or Severe
Warning or Severe
Warning
Warning or Severe
Warning or Severe
Warning or Severe +| Needs Attention | [Tablet peers](#fix-storage-alerts)
[Node free storage](#fix-storage-alerts)
[More than 34% of nodes down](#fix-nodes-reporting-as-down-alerts)
[Memory Utilization](#fix-memory-alerts)
[YSQL Connections](#fix-ysql-connection-alerts)
[CPU Utilization](#fix-cpu-alerts) | Warning or Severe
Warning or Severe
Warning
Warning or Severe
Warning or Severe
Warning or Severe | | Unhealthy | [More than 66% of nodes down](#fix-nodes-reporting-as-down-alerts)
[CMK unavailable](#fix-cmk-unavailable-alerts) | Severe
Warning | To see the alert conditions that caused the current health condition, click the cluster health icon. @@ -74,6 +74,7 @@ When you receive a cluster alert, the first step is to review the chart for the | Alert | Metric | | :--- | :--- | +| [Tablet Peers](#fix-tablet-peer-alerts) | Tablets | | [Disk Throughput](#fix-throughput-alerts) | Disk IOPS | | [Disk IOPS](#fix-iops-alerts) | Disk IOPS | | [Node Free Storage](#fix-storage-alerts) | Disk Usage metric | @@ -93,6 +94,17 @@ If you get frequent cluster alerts on a [Sandbox cluster](../../cloud-basics/cre {{< /note >}} +#### Fix tablet peer alerts + +YugabyteDB Aeon sends a notification when the number of [tablet peers](../../../architecture/docdb-replication/replication/#tablet-peers) in the cluster exceeds the threshold, as follows: + +- Number of tablet peers is 85% of the cluster limit (Warning). +- Number of tablet peers is 100% of the cluster limit (Severe). + +If the number of tablet peers in the cluster approaches the limit for the cluster, consider scaling the cluster horizontally by adding nodes, or vertically by adding vCPUs. + +For information on scaling clusters, refer to [Scale and configure clusters](../../cloud-clusters/configure-clusters/). + #### Fix throughput alerts YugabyteDB Aeon sends a notification when the disk throughput on any node in the cluster exceeds the threshold, as follows: From 2f70696bafc0a0cb426bb1f27221887ae47cb3a7 Mon Sep 17 00:00:00 2001 From: Dwight Hodge <79169168+ddhodge@users.noreply.github.com> Date: Tue, 17 Sep 2024 11:08:43 -0400 Subject: [PATCH 54/75] [doc] Smart driver clarification (#23933) * using smart driver clarification * review comment --- docs/content/preview/drivers-orms/smart-drivers.md | 8 ++++---- docs/content/preview/faq/smart-drivers-faq.md | 12 ++++++++++-- .../cloud-connect/connect-applications.md | 2 +- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/docs/content/preview/drivers-orms/smart-drivers.md b/docs/content/preview/drivers-orms/smart-drivers.md index 8da48c694442..3e0cd891b5cd 100644 --- a/docs/content/preview/drivers-orms/smart-drivers.md +++ b/docs/content/preview/drivers-orms/smart-drivers.md @@ -186,17 +186,17 @@ YugabyteDB Aeon clusters also support topology-aware load balancing. If the clus ### Deploying applications -To take advantage of smart driver load balancing features when connecting to clusters in YugabyteDB Aeon, applications using smart drivers must be deployed in a VPC that has been peered with the cluster VPC. For information on VPC peering in YugabyteDB Aeon, refer to [VPC network](../../yugabyte-cloud/cloud-basics/cloud-vpcs/). +To take advantage of smart driver load balancing features when connecting to clusters in YugabyteDB Aeon, applications using smart drivers must be deployed in a VPC that has been [peered with the cluster VPC](../../yugabyte-cloud/cloud-basics/cloud-vpcs/cloud-add-peering/). For applications that access the cluster from outside the peered network or using private endpoints via a private link, set the load balance connection parameter to `false`; in this case, the cluster performs the load balancing. -Applications that use smart drivers from outside the peered network fall back to the upstream driver behavior automatically. You may see a warning similar to the following: +Applications that use smart drivers from outside the peered network with load balance on will try to connect to the inaccessible nodes before falling back to the upstream driver behavior. You may see a warning similar to the following: ```output WARNING [com.yug.Driver] (agroal-11) Failed to apply load balance. Trying normal connection ``` -This indicates that the smart driver was unable to perform smart load balancing, and will fall back to the upstream behavior. +This indicates that the smart driver was unable to perform smart load balancing. To avoid the added latency incurred, turn load balance off. -For applications that access the cluster from outside the peered network or using private endpoints via a private link, use the upstream PostgreSQL driver instead; in this case, the cluster performs the load balancing. +For information on VPC peering in YugabyteDB Aeon, refer to [VPC network](../../yugabyte-cloud/cloud-basics/cloud-vpcs/). ### SSL/TLS verify-full support diff --git a/docs/content/preview/faq/smart-drivers-faq.md b/docs/content/preview/faq/smart-drivers-faq.md index 18395027af30..89fb288aa126 100644 --- a/docs/content/preview/faq/smart-drivers-faq.md +++ b/docs/content/preview/faq/smart-drivers-faq.md @@ -52,9 +52,17 @@ Topology-aware load balancing further achieves lower latencies by enabling appli ### When should I use a smart driver? -**YugabyteDB** - Use a smart driver if all the nodes in the cluster are available for direct connectivity from the location where the client application is running. +- YugabyteDB - Use a smart driver if all the nodes in the cluster are available for direct connectivity from the location where the client application is running. For example, if the VPC hosting YugabateDB is peered with the VPC hosting the application. -**YugabyteDB Aeon** - Use a smart driver if your client application is running in a peered VPC. Without a smart driver, YugabyteDB Aeon falls back to the connection load balancing provided by cloud providers; however you lose many of the advantages of cluster- and topology-awareness provided by the smart drivers. +- YugabyteDB Aeon - Use a smart driver if your client application is running in a peered VPC. Without a smart driver, YugabyteDB Aeon falls back to the connection load balancing provided by cloud providers; however you lose many of the advantages of cluster- and topology-awareness provided by the smart drivers. + +If the external address given in the connection URL and individual nodes are not accessible directly, do not enable smart driver load balancing. Applications that use smart drivers from outside the peered network with load balance on will try to connect to the inaccessible nodes before falling back to the upstream driver behavior. You may see a warning similar to the following: + +```output +WARNING [com.yug.Driver] (agroal-11) Failed to apply load balance. Trying normal connection +``` + +This indicates that the smart driver was unable to perform smart load balancing. To avoid the added latency incurred, turn load balance off or use the upstream driver. ### How hard is it to port an application to use a smart driver? diff --git a/docs/content/preview/yugabyte-cloud/cloud-connect/connect-applications.md b/docs/content/preview/yugabyte-cloud/cloud-connect/connect-applications.md index 4c3e65551d64..96e78b5f7649 100644 --- a/docs/content/preview/yugabyte-cloud/cloud-connect/connect-applications.md +++ b/docs/content/preview/yugabyte-cloud/cloud-connect/connect-applications.md @@ -46,7 +46,7 @@ Clusters deployed in VPCs don't expose public IP addresses unless you explicitly #### Using smart drivers -To take advantage of smart driver load balancing features when connecting to clusters in YugabyteDB Aeon, applications using smart drivers _must_ be deployed in a VPC that has been peered with the cluster VPC. If not deployed in a peered VPC, the smart driver falls back to the upstream driver behavior. For more information on smart drivers and using smart drivers with YugabyteDB Aeon, refer to [YugabyteDB smart drivers for YSQL](../../../drivers-orms/smart-drivers/). +To take advantage of smart driver load balancing features when connecting to clusters in YugabyteDB Aeon, applications using smart drivers _must_ be deployed in a VPC that has been peered with the cluster VPC. If not deployed in a peered VPC, although the smart driver falls back to the upstream driver behavior, it first attempts to connect to the inaccessible nodes, incurring added latency. For more information on smart drivers and using smart drivers with YugabyteDB Aeon, refer to [YugabyteDB smart drivers for YSQL](../../../drivers-orms/smart-drivers/). ### Cluster certificate From 1525ced9cdb47e27337184cdc1c9ef1d3f437591 Mon Sep 17 00:00:00 2001 From: Aishwarya Chakravarthy Date: Tue, 17 Sep 2024 11:10:42 -0400 Subject: [PATCH 55/75] [docs] fix for a yb version not rendering (#23944) --- docs/content/preview/quick-start/include-connect.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/preview/quick-start/include-connect.md b/docs/content/preview/quick-start/include-connect.md index 11ddba32b572..794cafe0f127 100644 --- a/docs/content/preview/quick-start/include-connect.md +++ b/docs/content/preview/quick-start/include-connect.md @@ -43,7 +43,7 @@ To open the YSQL shell, run `ysqlsh`: ``` ```output -ysqlsh (11.2-YB-{{}}-b0) +ysqlsh (11.2-YB-2.23.0.0-b0) Type "help" for help. yugabyte=# From e9f3ec275a6d6420608f084e0c90d65608e4b1f7 Mon Sep 17 00:00:00 2001 From: Bvsk Patnaik Date: Mon, 16 Sep 2024 18:05:30 -0700 Subject: [PATCH 56/75] [#23843] YSQL: Fix flaky test testSchemaMismatchRetry in TestPgBatch Summary: The test TestPgBatch#testSchemaMismatchRetry (introduced in commit dd60793542) relies on a tserver node not receiving a heartbeat in the time period between ALTER table and the next UDPATE statement. The timing on the next heartbeat is uncertain and especially tight given the low interval in tests. Error resembles the following form ``` Internal retries are not supported in batched execution mode: expected java.sql.BatchUpdateException to be thrown, but nothing was thrown ``` Disable heartbeats before issuing the alter command so that we can reliably trigger schema version mismatch error. Jira: DB-12746 Test Plan: Jenkins: test regex: .*testSchemaMismatchRetry.* Local testing to detect any flakiness. ``` macos# ./yb_build.sh release --java-test TestPgBatch#testSchemaMismatchRetry -n 50 alma8# ./yb_build.sh asan --java-test TestPgBatch#testSchemaMismatchRetry -n 50 alma8# ./yb_build.sh tsan --java-test TestPgBatch#testSchemaMismatchRetry -n 50 ``` Backport-through: 2.20 Reviewers: tfoucher, smishra Reviewed By: tfoucher Differential Revision: https://phorge.dev.yugabyte.com/D38111 --- java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgBatch.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgBatch.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgBatch.java index d17191fc70d5..1d0c8295ba2f 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgBatch.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgBatch.java @@ -20,6 +20,7 @@ import static org.yb.AssertionWrappers.assertThrows; import static org.yb.AssertionWrappers.assertTrue; +import com.google.common.net.HostAndPort; import com.yugabyte.util.PSQLException; import java.sql.BatchUpdateException; import java.sql.Connection; @@ -162,6 +163,11 @@ public void testSchemaMismatchRetry() throws Throwable { for (int i = 1; i <= 2; i++) { s1.addBatch(String.format("UPDATE t SET v=2 WHERE k=%d", i)); } + // Disable heartbeats so that catalog version is not propagated. + for (HostAndPort hp : miniCluster.getTabletServers().keySet()) { + assertTrue(miniCluster.getClient().setFlag( + hp, "TEST_tserver_disable_heartbeat", "true", true)); + } // Causes a schema version mismatch error on the next UPDATE statement. // Execute ALTER in a different session c2 so as not to invalidate // the catalog cache of c1 until the next heartbeat with the master. From 89b69cfc010fc5c0ef31ce968026d1f42b9a0eae Mon Sep 17 00:00:00 2001 From: Timothy Elgersma Date: Tue, 17 Sep 2024 12:41:17 +0000 Subject: [PATCH 57/75] [#23943]: YSQL: Fix Bitmap Scan crash in fastdebug GCC11 Summary: Intermittently, in fastdebug compiled with gcc11 (no repro on other build types, or on fastdebug with clang), bitmap scan queries with a Bitmap And crash. The crash is caused by a segmentation fault while removing an item from the bitmap set: ```lang=c++ // for each elem in a, if it's not also in b, delete a's copy auto iterb = b->begin(); for (auto itera = a->begin(); itera != a->end();) { if ((iterb = b->find(*itera)) == b->end()) { FreeSlice(*itera); itera = a->erase(itera); // <----- crashes here inside the erase call. } else { ++itera; } } ``` Moving the `FreeSlice` line to after removing the item from the list allows it to complete successfully, with the same behaviour as before. My guess is that gcc11 tries to invoke the destructor of the slice when removing it from the set, and since the slice is freed but not set to NULL, it points to some garbage value that occasionally results in a crash. Test Plan: ```lang=sj ./yb_build.sh fastdebug --java-test 'org.yb.pgsql.TestPgRegressYbBitmapScans' --gcc11 ``` Or, manually: 1. Build: ``` ./yb_build.sh fastdebug --gcc11 ``` 2. Restart cluster 3. Run ```lang=sql /*+ Set(yb_enable_bitmapscan true) Set(yb_enable_base_scans_cost_model true) BitmapScan(t) */ EXPLAIN (ANALYZE, SUMMARY OFF, COSTS OFF) SELECT * FROM test_and t WHERE a < 5 AND b < 5; \watch .1 ``` Before this change, it would crash usually on the first or second invocation. With this change, it runs successfully for at least a few minutes. Reviewers: amartsinchyk Reviewed By: amartsinchyk Subscribers: yql Differential Revision: https://phorge.dev.yugabyte.com/D38080 --- src/yb/yql/pggate/ybc_pggate.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/yb/yql/pggate/ybc_pggate.cc b/src/yb/yql/pggate/ybc_pggate.cc index b357248e92f2..acaf3f79e77e 100644 --- a/src/yb/yql/pggate/ybc_pggate.cc +++ b/src/yb/yql/pggate/ybc_pggate.cc @@ -646,6 +646,7 @@ typedef std::unordered_set UnorderedSliceSet; static void FreeSlice(Slice slice) { delete[] slice.data(), slice.size(); + slice.Clear(); } SliceSet YBCBitmapCreateSet() { From 27446e26683667e6db947d0fc3c52472e06dc14a Mon Sep 17 00:00:00 2001 From: Aishwarya Chakravarthy Date: Tue, 17 Sep 2024 13:25:20 -0400 Subject: [PATCH 58/75] [DOC-470] Include SSL Connectivity within the source database tabs. (#23878) * formatting changes and tab placements * format --------- Co-authored-by: Dwight Hodge --- .../migrate/live-fall-back.md | 682 +++++++++--------- .../migrate/live-fall-forward.md | 20 +- .../yugabyte-voyager/migrate/live-migrate.md | 20 +- 3 files changed, 364 insertions(+), 358 deletions(-) diff --git a/docs/content/preview/yugabyte-voyager/migrate/live-fall-back.md b/docs/content/preview/yugabyte-voyager/migrate/live-fall-back.md index fba8b70193c6..5db054a57d86 100644 --- a/docs/content/preview/yugabyte-voyager/migrate/live-fall-back.md +++ b/docs/content/preview/yugabyte-voyager/migrate/live-fall-back.md @@ -97,101 +97,101 @@ Create a new database user, and assign the necessary user permissions. {{% tab header="Standalone Oracle Container Database" %}} - 1. Ensure that your database log_mode is `archivelog` as follows: - - ```sql - SELECT LOG_MODE FROM V$DATABASE; - ``` - - ```output - LOG_MODE - ------------ - ARCHIVELOG - ``` - - If log_mode is NOARCHIVELOG (that is, not enabled), run the following command: - - ```sql - sqlplus /nolog - SQL>alter system set db_recovery_file_dest_size = 10G; - SQL>alter system set db_recovery_file_dest = '/oradata/recovery_area' scope=spfile; - SQL> connect / as sysdba - SQL> Shutdown immediate - SQL> Startup mount - SQL> Alter database archivelog; - SQL> Alter database open; - ``` - - 1. Create the tablespaces as follows: - - 1. Connect to Pluggable database (PDB) as sysdba and run the following command: - - ```sql - CREATE TABLESPACE logminer_tbs DATAFILE '/opt/oracle/oradata/ORCLCDB/ORCLPDB1/logminer_tbs.dbf' - SIZE 25M REUSE AUTOEXTEND ON MAXSIZE UNLIMITED; - ``` - - 1. Connect to Container database (CDB) as sysdba and run the following command: - - ```sql - CREATE TABLESPACE logminer_tbs DATAFILE '/opt/oracle/oradata/ORCLCDB/logminer_tbs.dbf' - SIZE 25M REUSE AUTOEXTEND ON MAXSIZE UNLIMITED; - ``` - - 1. Run the following commands from CDB as sysdba: - - ```sql - CREATE USER c##ybvoyager IDENTIFIED BY password - DEFAULT TABLESPACE logminer_tbs - QUOTA UNLIMITED ON logminer_tbs - CONTAINER=ALL; - - GRANT CREATE SESSION TO c##ybvoyager CONTAINER=ALL; - GRANT SET CONTAINER TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT ON V_$DATABASE to c##ybvoyager CONTAINER=ALL; - GRANT FLASHBACK ANY TABLE TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT ANY TABLE TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT_CATALOG_ROLE TO c##ybvoyager CONTAINER=ALL; - GRANT EXECUTE_CATALOG_ROLE TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT ANY TRANSACTION TO c##ybvoyager CONTAINER=ALL; - GRANT LOGMINING TO c##ybvoyager CONTAINER=ALL; - - GRANT CREATE TABLE TO c##ybvoyager CONTAINER=ALL; - GRANT LOCK ANY TABLE TO c##ybvoyager CONTAINER=ALL; - GRANT CREATE SEQUENCE TO c##ybvoyager CONTAINER=ALL; - - GRANT EXECUTE ON DBMS_LOGMNR TO c##ybvoyager CONTAINER=ALL; - GRANT EXECUTE ON DBMS_LOGMNR_D TO c##ybvoyager CONTAINER=ALL; - - GRANT SELECT ON V_$LOG TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT ON V_$LOG_HISTORY TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT ON V_$LOGMNR_LOGS TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT ON V_$LOGMNR_CONTENTS TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT ON V_$LOGMNR_PARAMETERS TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT ON V_$LOGFILE TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT ON V_$ARCHIVED_LOG TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT ON V_$ARCHIVE_DEST_STATUS TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT ON V_$TRANSACTION TO c##ybvoyager CONTAINER=ALL; - - GRANT SELECT ON V_$MYSTAT TO c##ybvoyager CONTAINER=ALL; - GRANT SELECT ON V_$STATNAME TO c##ybvoyager CONTAINER=ALL; - ``` - - 1. Enable supplemental logging in the database as follows: - - ```sql - ALTER DATABASE ADD SUPPLEMENTAL LOG DATA; - ALTER DATABASE ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS; - ``` - - 1. Create `ybvoyager_metadata` schema or user, and tables for voyager to use during migration as follows: +1. Ensure that your database log_mode is `archivelog` as follows: - ```sql - CREATE USER ybvoyager_metadata IDENTIFIED BY "password"; - GRANT CONNECT, RESOURCE TO ybvoyager_metadata; - ALTER USER ybvoyager_metadata QUOTA UNLIMITED ON USERS; + ```sql + SELECT LOG_MODE FROM V$DATABASE; + ``` + + ```output + LOG_MODE + ------------ + ARCHIVELOG + ``` + + If log_mode is NOARCHIVELOG (that is, not enabled), run the following command: + + ```sql + sqlplus /nolog + SQL>alter system set db_recovery_file_dest_size = 10G; + SQL>alter system set db_recovery_file_dest = '/oradata/recovery_area' scope=spfile; + SQL> connect / as sysdba + SQL> Shutdown immediate + SQL> Startup mount + SQL> Alter database archivelog; + SQL> Alter database open; + ``` + +1. Create the tablespaces as follows: + + 1. Connect to Pluggable database (PDB) as sysdba and run the following command: + + ```sql + CREATE TABLESPACE logminer_tbs DATAFILE '/opt/oracle/oradata/ORCLCDB/ORCLPDB1/logminer_tbs.dbf' + SIZE 25M REUSE AUTOEXTEND ON MAXSIZE UNLIMITED; + ``` + + 1. Connect to Container database (CDB) as sysdba and run the following command: + + ```sql + CREATE TABLESPACE logminer_tbs DATAFILE '/opt/oracle/oradata/ORCLCDB/logminer_tbs.dbf' + SIZE 25M REUSE AUTOEXTEND ON MAXSIZE UNLIMITED; + ``` + +1. Run the following commands from CDB as sysdba: + + ```sql + CREATE USER c##ybvoyager IDENTIFIED BY password + DEFAULT TABLESPACE logminer_tbs + QUOTA UNLIMITED ON logminer_tbs + CONTAINER=ALL; + + GRANT CREATE SESSION TO c##ybvoyager CONTAINER=ALL; + GRANT SET CONTAINER TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT ON V_$DATABASE to c##ybvoyager CONTAINER=ALL; + GRANT FLASHBACK ANY TABLE TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT ANY TABLE TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT_CATALOG_ROLE TO c##ybvoyager CONTAINER=ALL; + GRANT EXECUTE_CATALOG_ROLE TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT ANY TRANSACTION TO c##ybvoyager CONTAINER=ALL; + GRANT LOGMINING TO c##ybvoyager CONTAINER=ALL; + + GRANT CREATE TABLE TO c##ybvoyager CONTAINER=ALL; + GRANT LOCK ANY TABLE TO c##ybvoyager CONTAINER=ALL; + GRANT CREATE SEQUENCE TO c##ybvoyager CONTAINER=ALL; + + GRANT EXECUTE ON DBMS_LOGMNR TO c##ybvoyager CONTAINER=ALL; + GRANT EXECUTE ON DBMS_LOGMNR_D TO c##ybvoyager CONTAINER=ALL; + + GRANT SELECT ON V_$LOG TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT ON V_$LOG_HISTORY TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT ON V_$LOGMNR_LOGS TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT ON V_$LOGMNR_CONTENTS TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT ON V_$LOGMNR_PARAMETERS TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT ON V_$LOGFILE TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT ON V_$ARCHIVED_LOG TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT ON V_$ARCHIVE_DEST_STATUS TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT ON V_$TRANSACTION TO c##ybvoyager CONTAINER=ALL; + + GRANT SELECT ON V_$MYSTAT TO c##ybvoyager CONTAINER=ALL; + GRANT SELECT ON V_$STATNAME TO c##ybvoyager CONTAINER=ALL; + ``` - CREATE TABLE ybvoyager_metadata.ybvoyager_import_data_event_channels_metainfo ( +1. Enable supplemental logging in the database as follows: + + ```sql + ALTER DATABASE ADD SUPPLEMENTAL LOG DATA; + ALTER DATABASE ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS; + ``` + +1. Create `ybvoyager_metadata` schema or user, and tables for voyager to use during migration as follows: + + ```sql + CREATE USER ybvoyager_metadata IDENTIFIED BY "password"; + GRANT CONNECT, RESOURCE TO ybvoyager_metadata; + ALTER USER ybvoyager_metadata QUOTA UNLIMITED ON USERS; + + CREATE TABLE ybvoyager_metadata.ybvoyager_import_data_event_channels_metainfo ( migration_uuid VARCHAR2(36), channel_no INT, last_applied_vsn NUMBER(19), @@ -201,7 +201,7 @@ Create a new database user, and assign the necessary user permissions. PRIMARY KEY (migration_uuid, channel_no) ); - CREATE TABLE ybvoyager_metadata.ybvoyager_imported_event_count_by_table ( + CREATE TABLE ybvoyager_metadata.ybvoyager_imported_event_count_by_table ( migration_uuid VARCHAR2(36), table_name VARCHAR2(250), channel_no INT, @@ -211,277 +211,287 @@ Create a new database user, and assign the necessary user permissions. num_deletes NUMBER(19), PRIMARY KEY (migration_uuid, table_name, channel_no) ); - ``` + ``` 1. Create a writer role for the source schema for Voyager to be able to write the changes from the target YugabyteDB database to the source database (in case of a fall-back): - ```sql - CREATE ROLE _writer_role; - - BEGIN - FOR R IN (SELECT owner, object_name FROM all_objects WHERE owner=UPPER('') and object_type ='TABLE' MINUS SELECT owner, table_name from all_nested_tables where owner = UPPER('')) - LOOP - EXECUTE IMMEDIATE 'GRANT SELECT, INSERT, UPDATE, DELETE, ALTER on '||R.owner||'."'||R.object_name||'" to _writer_role'; - END LOOP; - END; - / - - DECLARE - v_sql VARCHAR2(4000); - BEGIN - FOR table_rec IN (SELECT table_name FROM all_tables WHERE owner = 'YBVOYAGER_METADATA') LOOP - v_sql := 'GRANT ALL PRIVILEGES ON YBVOYAGER_METADATA.' || table_rec.table_name || ' TO _writer_role'; - EXECUTE IMMEDIATE v_sql; - END LOOP; - END; - / + ```sql + CREATE ROLE _writer_role; - GRANT CREATE ANY SEQUENCE, SELECT ANY SEQUENCE, ALTER ANY SEQUENCE TO _writer_role; - ``` + BEGIN + FOR R IN (SELECT owner, object_name FROM all_objects WHERE owner=UPPER('') and object_type ='TABLE' MINUS SELECT owner, table_name from all_nested_tables where owner = UPPER('')) + LOOP + EXECUTE IMMEDIATE 'GRANT SELECT, INSERT, UPDATE, DELETE, ALTER on '||R.owner||'."'||R.object_name||'" to _writer_role'; + END LOOP; + END; + / + + DECLARE + v_sql VARCHAR2(4000); + BEGIN + FOR table_rec IN (SELECT table_name FROM all_tables WHERE owner = 'YBVOYAGER_METADATA') LOOP + v_sql := 'GRANT ALL PRIVILEGES ON YBVOYAGER_METADATA.' || table_rec.table_name || ' TO _writer_role'; + EXECUTE IMMEDIATE v_sql; + END LOOP; + END; + / + + GRANT CREATE ANY SEQUENCE, SELECT ANY SEQUENCE, ALTER ANY SEQUENCE TO _writer_role; + ``` 1. Assign the writer role to the source database user as follows: - ```sql - GRANT _writer_role TO c##ybvoyager; - ``` + ```sql + GRANT _writer_role TO c##ybvoyager; + ``` {{% /tab %}} {{% tab header="RDS Oracle" %}} - 1. Ensure that your database log_mode is `archivelog` as follows: +1. Ensure that your database log_mode is `archivelog` as follows: - ```sql - SELECT LOG_MODE FROM V$DATABASE; - ``` + ```sql + SELECT LOG_MODE FROM V$DATABASE; + ``` - ```output - LOG_MODE - ------------ - ARCHIVELOG - ``` + ```output + LOG_MODE + ------------ + ARCHIVELOG + ``` - If log_mode is NOARCHIVELOG (that is, not enabled), run the following command: + If log_mode is NOARCHIVELOG (that is, not enabled), run the following command: - ```sql - exec rdsadmin.rdsadmin_util.set_configuration('archivelog retention hours',24); - ``` + ```sql + exec rdsadmin.rdsadmin_util.set_configuration('archivelog retention hours',24); + ``` - 1. Connect to your database as an admin user, and create the tablespaces as follows: +1. Connect to your database as an admin user, and create the tablespaces as follows: - ```sql - CREATE TABLESPACE logminer_tbs DATAFILE SIZE 25M AUTOEXTEND ON MAXSIZE UNLIMITED; - ``` + ```sql + CREATE TABLESPACE logminer_tbs DATAFILE SIZE 25M AUTOEXTEND ON MAXSIZE UNLIMITED; + ``` - 1. Run the following commands connected to the admin or privileged user: +1. Run the following commands connected to the admin or privileged user: + + ```sql + CREATE USER ybvoyager IDENTIFIED BY password + DEFAULT TABLESPACE logminer_tbs + QUOTA UNLIMITED ON logminer_tbs; + + GRANT CREATE SESSION TO YBVOYAGER; + begin rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'V_$DATABASE', + p_grantee => 'YBVOYAGER', + p_privilege => 'SELECT'); + end; + / + + GRANT FLASHBACK ANY TABLE TO YBVOYAGER; + GRANT SELECT ANY TABLE TO YBVOYAGER; + GRANT SELECT_CATALOG_ROLE TO YBVOYAGER; + GRANT EXECUTE_CATALOG_ROLE TO YBVOYAGER; + GRANT SELECT ANY TRANSACTION TO YBVOYAGER; + GRANT LOGMINING TO YBVOYAGER; + + GRANT CREATE TABLE TO YBVOYAGER; + GRANT LOCK ANY TABLE TO YBVOYAGER; + GRANT CREATE SEQUENCE TO YBVOYAGER; + + + begin rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'DBMS_LOGMNR', + p_grantee => 'YBVOYAGER', + p_privilege => 'EXECUTE', + p_grant_option => true); + end; + / + + begin rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'DBMS_LOGMNR_D', + p_grantee => 'YBVOYAGER', + p_privilege => 'EXECUTE', + p_grant_option => true); + end; + / - ```sql - CREATE USER ybvoyager IDENTIFIED BY password - DEFAULT TABLESPACE logminer_tbs - QUOTA UNLIMITED ON logminer_tbs; + begin rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'V_$LOG', + p_grantee => 'YBVOYAGER', + p_privilege => 'SELECT'); + end; + / - GRANT CREATE SESSION TO YBVOYAGER; - begin rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'V_$DATABASE', + begin + rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'V_$LOG_HISTORY', p_grantee => 'YBVOYAGER', p_privilege => 'SELECT'); - end; - / - - GRANT FLASHBACK ANY TABLE TO YBVOYAGER; - GRANT SELECT ANY TABLE TO YBVOYAGER; - GRANT SELECT_CATALOG_ROLE TO YBVOYAGER; - GRANT EXECUTE_CATALOG_ROLE TO YBVOYAGER; - GRANT SELECT ANY TRANSACTION TO YBVOYAGER; - GRANT LOGMINING TO YBVOYAGER; - - GRANT CREATE TABLE TO YBVOYAGER; - GRANT LOCK ANY TABLE TO YBVOYAGER; - GRANT CREATE SEQUENCE TO YBVOYAGER; - - - begin rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'DBMS_LOGMNR', - p_grantee => 'YBVOYAGER', - p_privilege => 'EXECUTE', - p_grant_option => true); - end; - / - - begin rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'DBMS_LOGMNR_D', - p_grantee => 'YBVOYAGER', - p_privilege => 'EXECUTE', - p_grant_option => true); - end; - / - - begin rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'V_$LOG', + end; + / + + begin + rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'V_$LOGMNR_LOGS', p_grantee => 'YBVOYAGER', p_privilege => 'SELECT'); - end; - / - - begin - rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'V_$LOG_HISTORY', - p_grantee => 'YBVOYAGER', - p_privilege => 'SELECT'); - end; - / - - begin - rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'V_$LOGMNR_LOGS', - p_grantee => 'YBVOYAGER', - p_privilege => 'SELECT'); - end; - / - - begin - rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'V_$LOGMNR_CONTENTS', - p_grantee => 'YBVOYAGER', - p_privilege => 'SELECT'); - end; - / - - begin - rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'V_$LOGMNR_PARAMETERS', - p_grantee => 'YBVOYAGER', - p_privilege => 'SELECT'); - end; - / - - begin - rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'V_$LOGFILE', - p_grantee => 'YBVOYAGER', - p_privilege => 'SELECT'); - end; - / - - begin - rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'V_$ARCHIVED_LOG', - p_grantee => 'YBVOYAGER', - p_privilege => 'SELECT'); - end; - / - - begin - rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'V_$ARCHIVE_DEST_STATUS', - p_grantee => 'YBVOYAGER', - p_privilege => 'SELECT'); - end; - / - - begin - rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'V_$TRANSACTION', - p_grantee => 'YBVOYAGER', - p_privilege => 'SELECT'); - end; - / - - begin - rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'V_$MYSTAT', - p_grantee => 'YBVOYAGER', - p_privilege => 'SELECT'); - end; - / - - begin - rdsadmin.rdsadmin_util.grant_sys_object( - p_obj_name => 'V_$STATNAME', - p_grantee => 'YBVOYAGER', - p_privilege => 'SELECT'); - end; - / - ``` - - 1. Enable supplemental logging in the database as follows: - - ```sql - exec rdsadmin.rdsadmin_util.alter_supplemental_logging('ADD'); - - begin - rdsadmin.rdsadmin_util.alter_supplemental_logging( - p_action => 'ADD', - p_type => 'ALL'); - end; - / - ``` - - 1. Create `ybvoyager_metadata` schema or user, and tables for voyager to use during migration as follows: + end; + / - ```sql - CREATE USER ybvoyager_metadata IDENTIFIED BY "password"; - GRANT CONNECT, RESOURCE TO ybvoyager_metadata; - ALTER USER ybvoyager_metadata QUOTA UNLIMITED ON USERS; + begin + rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'V_$LOGMNR_CONTENTS', + p_grantee => 'YBVOYAGER', + p_privilege => 'SELECT'); + end; + / - CREATE TABLE ybvoyager_metadata.ybvoyager_import_data_event_channels_metainfo ( - migration_uuid VARCHAR2(36), - channel_no INT, - last_applied_vsn NUMBER(19), - num_inserts NUMBER(19), - num_updates NUMBER(19), - num_deletes NUMBER(19), - PRIMARY KEY (migration_uuid, channel_no) - ); + begin + rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'V_$LOGMNR_PARAMETERS', + p_grantee => 'YBVOYAGER', + p_privilege => 'SELECT'); + end; + / - CREATE TABLE ybvoyager_metadata.ybvoyager_imported_event_count_by_table ( - migration_uuid VARCHAR2(36), - table_name VARCHAR2(250), - channel_no INT, - total_events NUMBER(19), - num_inserts NUMBER(19), - num_updates NUMBER(19), - num_deletes NUMBER(19), - PRIMARY KEY (migration_uuid, table_name, channel_no) - ); - ``` + begin + rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'V_$LOGFILE', + p_grantee => 'YBVOYAGER', + p_privilege => 'SELECT'); + end; + / + + begin + rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'V_$ARCHIVED_LOG', + p_grantee => 'YBVOYAGER', + p_privilege => 'SELECT'); + end; + / + + begin + rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'V_$ARCHIVE_DEST_STATUS', + p_grantee => 'YBVOYAGER', + p_privilege => 'SELECT'); + end; + / + + begin + rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'V_$TRANSACTION', + p_grantee => 'YBVOYAGER', + p_privilege => 'SELECT'); + end; + / + + begin + rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'V_$MYSTAT', + p_grantee => 'YBVOYAGER', + p_privilege => 'SELECT'); + end; + / + + begin + rdsadmin.rdsadmin_util.grant_sys_object( + p_obj_name => 'V_$STATNAME', + p_grantee => 'YBVOYAGER', + p_privilege => 'SELECT'); + end; + / + ``` + +1. Enable supplemental logging in the database as follows: + + ```sql + exec rdsadmin.rdsadmin_util.alter_supplemental_logging('ADD'); + + begin + rdsadmin.rdsadmin_util.alter_supplemental_logging( + p_action => 'ADD', + p_type => 'ALL'); + end; + / + ``` + +1. Create `ybvoyager_metadata` schema or user, and tables for voyager to use during migration as follows: + + ```sql + CREATE USER ybvoyager_metadata IDENTIFIED BY "password"; + GRANT CONNECT, RESOURCE TO ybvoyager_metadata; + ALTER USER ybvoyager_metadata QUOTA UNLIMITED ON USERS; + + CREATE TABLE ybvoyager_metadata.ybvoyager_import_data_event_channels_metainfo ( + migration_uuid VARCHAR2(36), + channel_no INT, + last_applied_vsn NUMBER(19), + num_inserts NUMBER(19), + num_updates NUMBER(19), + num_deletes NUMBER(19), + PRIMARY KEY (migration_uuid, channel_no) + ); + + CREATE TABLE ybvoyager_metadata.ybvoyager_imported_event_count_by_table ( + migration_uuid VARCHAR2(36), + table_name VARCHAR2(250), + channel_no INT, + total_events NUMBER(19), + num_inserts NUMBER(19), + num_updates NUMBER(19), + num_deletes NUMBER(19), + PRIMARY KEY (migration_uuid, table_name, channel_no) + ); + ``` 1. Create a writer role for the source schema for Voyager to be able to write the changes from the target YugabyteDB database to the source database (in case of a fall-back): - ```sql - CREATE ROLE _writer_role; - - BEGIN - FOR R IN (SELECT owner, object_name FROM all_objects WHERE owner=UPPER('') and object_type ='TABLE' MINUS SELECT owner, table_name from all_nested_tables where owner = UPPER('')) - LOOP - EXECUTE IMMEDIATE 'GRANT SELECT, INSERT, UPDATE, DELETE, ALTER on '||R.owner||'."'||R.object_name||'" to _writer_role'; - END LOOP; - END; - / - - DECLARE - v_sql VARCHAR2(4000); - BEGIN - FOR table_rec IN (SELECT table_name FROM all_tables WHERE owner = 'YBVOYAGER_METADATA') LOOP - v_sql := 'GRANT ALL PRIVILEGES ON YBVOYAGER_METADATA.' || table_rec.table_name || ' TO _writer_role'; - EXECUTE IMMEDIATE v_sql; - END LOOP; - END; - / + ```sql + CREATE ROLE _writer_role; - GRANT CREATE ANY SEQUENCE, SELECT ANY SEQUENCE, ALTER ANY SEQUENCE TO _writer_role; - ``` + BEGIN + FOR R IN (SELECT owner, object_name FROM all_objects WHERE owner=UPPER('') and object_type ='TABLE' MINUS SELECT owner, table_name from all_nested_tables where owner = UPPER('')) + LOOP + EXECUTE IMMEDIATE 'GRANT SELECT, INSERT, UPDATE, DELETE, ALTER on '||R.owner||'."'||R.object_name||'" to _writer_role'; + END LOOP; + END; + / + + DECLARE + v_sql VARCHAR2(4000); + BEGIN + FOR table_rec IN (SELECT table_name FROM all_tables WHERE owner = 'YBVOYAGER_METADATA') LOOP + v_sql := 'GRANT ALL PRIVILEGES ON YBVOYAGER_METADATA.' || table_rec.table_name || ' TO _writer_role'; + EXECUTE IMMEDIATE v_sql; + END LOOP; + END; + / + + GRANT CREATE ANY SEQUENCE, SELECT ANY SEQUENCE, ALTER ANY SEQUENCE TO _writer_role; + ``` 1. Assign the writer role to the source database user as follows: - ```sql - GRANT _writer_role TO ybvoyager; - ``` + ```sql + GRANT _writer_role TO ybvoyager; + ``` {{% /tab %}} {{< /tabpane >}} +If you want yb-voyager to connect to the source database over SSL, refer to [SSL Connectivity](../../reference/yb-voyager-cli/#ssl-connectivity). + +{{< note title="Connecting to Oracle instances" >}} +You can use only one of the following arguments to connect to your Oracle instance: + +- --source-db-schema (Schema name of the source database.) +- --oracle-db-sid (Oracle System Identifier you can use while exporting data from Oracle instances.) +- --oracle-tns-alias (TNS (Transparent Network Substrate) alias configured to establish a secure connection with the server.) +{{< /note >}} +
@@ -724,17 +734,9 @@ Create a new database user, and assign the necessary user permissions. {{< /tabpane >}} -
- If you want yb-voyager to connect to the source database over SSL, refer to [SSL Connectivity](../../reference/yb-voyager-cli/#ssl-connectivity). -{{< note title="Connecting to Oracle instances" >}} -You can use only one of the following arguments to connect to your Oracle instance: - -- --source-db-schema (Schema name of the source database.) -- --oracle-db-sid (Oracle System Identifier you can use while exporting data from Oracle instances.) -- --oracle-tns-alias (TNS (Transparent Network Substrate) alias configured to establish a secure connection with the server.) -{{< /note >}} + ## Prepare the target database @@ -1223,9 +1225,9 @@ Perform the following steps as part of the cutover process: 1. Re-enable triggers and foreign-key constraints on the source database using the following PL/SQL commands on the source schema as a privileged user: -{{< tabpane text=true >}} + {{< tabpane text=true >}} - {{% tab header="Oracle" %}} + {{% tab header="Oracle" %}} ```sql --enable triggers @@ -1250,9 +1252,9 @@ Perform the following steps as part of the cutover process: / ``` - {{% /tab %}} + {{% /tab %}} - {{% tab header="PostgreSQL" %}} + {{% tab header="PostgreSQL" %}} Use the following PL/SQL to enable the triggers and create foreign key constraints back before using the source again. @@ -1277,9 +1279,9 @@ Perform the following steps as part of the cutover process: --you can use schema dump from source which is use to import schema on target YugabyteDB (with the modifications if made in schema migration phase), one copy of the pure form of that dump is stored in `$EXPORT_DIR/temp/schema.sql`. ``` - {{% /tab %}} + {{% /tab %}} -{{< /tabpane >}} + {{< /tabpane >}} 1. Verify your migration. After the schema and data import is complete, the automated part of the database migration process is considered complete. You should manually run validation queries on both the source and target databases to ensure that the data is correctly migrated. A sample query to validate the databases can include checking the row count of each table. diff --git a/docs/content/preview/yugabyte-voyager/migrate/live-fall-forward.md b/docs/content/preview/yugabyte-voyager/migrate/live-fall-forward.md index 93bed1c751e6..c70b672ea2b6 100644 --- a/docs/content/preview/yugabyte-voyager/migrate/live-fall-forward.md +++ b/docs/content/preview/yugabyte-voyager/migrate/live-fall-forward.md @@ -359,6 +359,16 @@ Create a new database user, and assign the necessary user permissions. {{< /tabpane >}} +If you want yb-voyager to connect to the source database over SSL, refer to [SSL Connectivity](../../reference/yb-voyager-cli/#ssl-connectivity). + +{{< note title="Connecting to Oracle instances" >}} +You can use only one of the following arguments to connect to your Oracle instance. + +- --source-db-schema (Schema name of the source database.) +- --oracle-db-sid (Oracle System Identifier you can use while exporting data from Oracle instances.) +- --oracle-tns-alias (TNS (Transparent Network Substrate) alias configured to establish a secure connection with the server.) +{{< /note >}} +
@@ -587,17 +597,9 @@ Create a new database user, and assign the necessary user permissions. {{< /tabpane >}} -
- If you want yb-voyager to connect to the source database over SSL, refer to [SSL Connectivity](../../reference/yb-voyager-cli/#ssl-connectivity). -{{< note title="Connecting to Oracle instances" >}} -You can use only one of the following arguments to connect to your Oracle instance. - -- --source-db-schema (Schema name of the source database.) -- --oracle-db-sid (Oracle System Identifier you can use while exporting data from Oracle instances.) -- --oracle-tns-alias (TNS (Transparent Network Substrate) alias configured to establish a secure connection with the server.) -{{< /note >}} + ## Prepare the target database diff --git a/docs/content/preview/yugabyte-voyager/migrate/live-migrate.md b/docs/content/preview/yugabyte-voyager/migrate/live-migrate.md index 9201e0b9af70..ed590af05dd3 100644 --- a/docs/content/preview/yugabyte-voyager/migrate/live-migrate.md +++ b/docs/content/preview/yugabyte-voyager/migrate/live-migrate.md @@ -345,6 +345,16 @@ Create a new database user, and assign the necessary user permissions. {{< /tabpane >}} +If you want yb-voyager to connect to the source database over SSL, refer to [SSL Connectivity](../../reference/yb-voyager-cli/#ssl-connectivity). + +{{< note title="Connecting to Oracle instances" >}} +You can use only one of the following arguments to connect to your Oracle instance. + +- --source-db-schema (Schema name of the source database.) +- --oracle-db-sid (Oracle System Identifier you can use while exporting data from Oracle instances.) +- --oracle-tns-alias (TNS (Transparent Network Substrate) alias configured to establish a secure connection with the server.) +{{< /note >}} +
@@ -573,17 +583,9 @@ Create a new database user, and assign the necessary user permissions. {{< /tabpane >}} -
- If you want yb-voyager to connect to the source database over SSL, refer to [SSL Connectivity](../../reference/yb-voyager-cli/#ssl-connectivity). -{{< note title="Connecting to Oracle instances" >}} -You can use only one of the following arguments to connect to your Oracle instance. - -- --source-db-schema (Schema name of the source database.) -- --oracle-db-sid (Oracle System Identifier you can use while exporting data from Oracle instances.) -- --oracle-tns-alias (TNS (Transparent Network Substrate) alias configured to establish a secure connection with the server.) -{{< /note >}} + ## Prepare the target database From def0facf9368f57769d781d0bee395ae2c346eb8 Mon Sep 17 00:00:00 2001 From: Zachary Drudi Date: Wed, 11 Sep 2024 11:40:29 -0400 Subject: [PATCH 59/75] [#23879] docdb: Improve rpc metrics test. Summary: The unit test for rpc metrics calls an rpc that has identical request and response objects. Because this test is interested in metrics that measure the size of the request and the response it would be much better to call an RPC whose request and response objects are of different sizes. This test also typos and doesn't check one of the client side metrics. Jira: DB-12784 Test Plan: ``` ./yb_build.sh --cxx-test rpc_stub-test --gtest_filter RpcStubTest.TrafficMetrics ``` Reviewers: asrivastava Reviewed By: asrivastava Subscribers: ybase, slingam Differential Revision: https://phorge.dev.yugabyte.com/D37982 --- src/yb/rpc/rpc_stub-test.cc | 58 +++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/src/yb/rpc/rpc_stub-test.cc b/src/yb/rpc/rpc_stub-test.cc index 1be1beb1f371..f93b1ea82174 100644 --- a/src/yb/rpc/rpc_stub-test.cc +++ b/src/yb/rpc/rpc_stub-test.cc @@ -838,30 +838,38 @@ TEST_F(RpcStubTest, ExpireInQueue) { } TEST_F(RpcStubTest, TrafficMetrics) { - constexpr size_t kStringLen = 1_KB; - constexpr size_t kUpperBytesLimit = kStringLen + 64; + constexpr std::pair kRequestBounds = std::make_pair(1, 64); + constexpr std::pair kResponseBounds = std::make_pair(1_KB, 1_KB + 64); CalculatorServiceProxy proxy(proxy_cache_.get(), server_hostport_); RpcController controller; - rpc_test::EchoRequestPB req; - req.set_data(RandomHumanReadableString(kStringLen)); - rpc_test::EchoResponsePB resp; - ASSERT_OK(proxy.Echo(req, &resp, &controller)); - + rpc_test::RepeatedEchoRequestPB req; + req.set_character('a'); + req.set_count(kResponseBounds.first); + rpc_test::RepeatedEchoResponsePB resp; + ASSERT_OK(proxy.RepeatedEcho(req, &resp, &controller)); + + auto find_metric_by_name = [](const MetricEntity::MetricMap& map, + const std::string& name) -> Result { + auto it = std::find_if( + map.begin(), map.end(), [&name](const auto& entry) { return entry.first->name() == name; }); + if (it == map.end()) { + return STATUS_FORMAT(NotFound, "Couldn't find metric $0", name); + } + return down_cast(it->second.get()); + }; auto server_metrics = server_messenger()->metric_entity()->UnsafeMetricsMapForTests(); - - auto* service_request_bytes = down_cast(FindOrDie( - server_metrics, &METRIC_service_request_bytes_yb_rpc_test_CalculatorService_Echo).get()); - auto* service_response_bytes = down_cast(FindOrDie( - server_metrics, &METRIC_service_response_bytes_yb_rpc_test_CalculatorService_Echo).get()); + auto* service_request_bytes = ASSERT_RESULT(find_metric_by_name( + server_metrics, "service_request_bytes_yb_rpc_test_CalculatorService_RepeatedEcho")); + auto* service_response_bytes = ASSERT_RESULT(find_metric_by_name( + server_metrics, "service_response_bytes_yb_rpc_test_CalculatorService_RepeatedEcho")); auto client_metrics = client_messenger_->metric_entity()->UnsafeMetricsMapForTests(); - - auto* proxy_request_bytes = down_cast(FindOrDie( - client_metrics, &METRIC_proxy_request_bytes_yb_rpc_test_CalculatorService_Echo).get()); - auto* proxy_response_bytes = down_cast(FindOrDie( - client_metrics, &METRIC_proxy_response_bytes_yb_rpc_test_CalculatorService_Echo).get()); + auto* proxy_request_bytes = ASSERT_RESULT(find_metric_by_name( + client_metrics, "proxy_request_bytes_yb_rpc_test_CalculatorService_RepeatedEcho")); + auto* proxy_response_bytes = ASSERT_RESULT(find_metric_by_name( + client_metrics, "proxy_response_bytes_yb_rpc_test_CalculatorService_RepeatedEcho")); LOG(INFO) << "Inbound request bytes: " << service_request_bytes->value() << ", response bytes: " << service_response_bytes->value(); @@ -871,14 +879,14 @@ TEST_F(RpcStubTest, TrafficMetrics) { // We don't expect that sent and received bytes on client and server matches, because some // auxilary fields are not calculated. // For instance request size is taken into account on client, but not server. - ASSERT_GE(service_request_bytes->value(), kStringLen); - ASSERT_LT(service_request_bytes->value(), kUpperBytesLimit); - ASSERT_GE(service_response_bytes->value(), kStringLen); - ASSERT_LT(service_response_bytes->value(), kUpperBytesLimit); - ASSERT_GE(proxy_request_bytes->value(), kStringLen); - ASSERT_LT(proxy_request_bytes->value(), kUpperBytesLimit); - ASSERT_GE(proxy_request_bytes->value(), kStringLen); - ASSERT_LT(proxy_request_bytes->value(), kUpperBytesLimit); + EXPECT_GE(service_request_bytes->value(), kRequestBounds.first); + EXPECT_LT(service_request_bytes->value(), kRequestBounds.second); + EXPECT_GE(service_response_bytes->value(), kResponseBounds.first); + EXPECT_LT(service_response_bytes->value(), kResponseBounds.second); + EXPECT_GE(proxy_request_bytes->value(), kRequestBounds.first); + EXPECT_LT(proxy_request_bytes->value(), kRequestBounds.second); + EXPECT_GE(proxy_response_bytes->value(), kResponseBounds.first); + EXPECT_LT(proxy_response_bytes->value(), kResponseBounds.second); } template From 87a936a64b6c25697d9b7f7fd6fd1ece235d1dc2 Mon Sep 17 00:00:00 2001 From: Muthu Chidambaram Date: Tue, 17 Sep 2024 12:39:52 +0000 Subject: [PATCH 60/75] [PLAT-15353] Consistency checks testing hooks Summary: This diff adds some testing hooks for consistency check testing. test_pending will shutdown YBA immediately after the update is applied to the DB, to test handling of pending tasks on YBA restart ysql_timeout_secs is a flag that allows configuration of ysql timeout for consistency check commands (default is 30s) update_delay_secs allows you to introduce a sleep after the DB commit occurs, simulating a slow/unreliable network connection. During this time YBA can be interrupted or ysql timeout may occur. Also adds some minor improvements around the consistency_check table (renamed to yba_consistency_check) and added yw_uuid and yw_ip columns that may be useful going forward. Only the yw_uuid is populated for now. Test Plan: consistency check with flags enabled, ensure failures are triggered regular task execution not impacted Reviewers: sanketh, dshubin, nsingh, anijhawan Reviewed By: sanketh, nsingh Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D37950 --- managed/RUNTIME-FLAGS.md | 3 +- .../commissioner/tasks/UniverseTaskBase.java | 38 +- .../subtasks/UpdateConsistencyCheck.java | 349 ++++++++++++------ .../com/yugabyte/yw/common/ConfigHelper.java | 8 + .../yw/common/CustomerTaskManager.java | 2 +- .../java/com/yugabyte/yw/common/Util.java | 2 +- .../yugabyte/yw/common/YsqlQueryExecutor.java | 24 +- .../yw/common/config/UniverseConfKeys.java | 28 +- .../yw/models/helpers/CommonUtils.java | 10 +- managed/src/main/resources/reference.conf | 7 +- .../yugabyte/yw/commissioner/MockUpgrade.java | 2 +- .../tasks/AddNodeToUniverseTest.java | 6 +- .../tasks/DeleteNodeFromUniverseTest.java | 4 +- .../tasks/DeleteXClusterConfigTest.java | 4 +- .../tasks/DestroyKubernetesUniverseTest.java | 2 - .../tasks/EditKubernetesUniverseTest.java | 6 - .../commissioner/tasks/EditUniverseTest.java | 4 +- .../tasks/EditXClusterConfigTest.java | 8 +- .../commissioner/tasks/PauseUniverseTest.java | 2 - .../tasks/ReadOnlyClusterCreateTest.java | 2 +- .../tasks/ReadOnlyClusterDeleteTest.java | 1 - .../tasks/RebootNodeInUniverseTest.java | 6 +- .../ReleaseInstanceFromUniverseTest.java | 2 +- .../tasks/RemoveNodeFromUniverseTest.java | 6 +- .../tasks/ResumeUniverseTest.java | 4 - .../tasks/StartMasterOnNodeTest.java | 2 +- .../tasks/StartNodeInUniverseTest.java | 4 +- .../tasks/StopNodeInUniverseTest.java | 10 +- .../tasks/UpgradeKubernetesUniverseTest.java | 4 +- .../local/LocalProviderUniverseTestBase.java | 8 +- .../local/UpdateConsistencyLocalTest.java | 6 +- .../tasks/upgrade/CertsRotateTest.java | 6 +- .../tasks/upgrade/FinalizeUpgradeTest.java | 4 +- .../upgrade/GFlagsKubernetesUpgradeTest.java | 2 +- .../RollbackKubernetesUpgradeTest.java | 2 +- .../tasks/upgrade/RollbackUpgradeTest.java | 6 +- .../SoftwareKubernetesUpgradeTest.java | 2 +- .../SoftwareKubernetesUpgradeYBTest.java | 2 +- .../tasks/upgrade/TlsToggleTest.java | 4 +- .../tasks/upgrade/VMImageUpgradeTest.java | 4 +- 40 files changed, 398 insertions(+), 198 deletions(-) diff --git a/managed/RUNTIME-FLAGS.md b/managed/RUNTIME-FLAGS.md index fa8a092dc58c..baf8f556a1e9 100644 --- a/managed/RUNTIME-FLAGS.md +++ b/managed/RUNTIME-FLAGS.md @@ -227,6 +227,7 @@ | "Use server broadcast address for yb_backup" | "yb.backup.use_server_broadcast_address_for_yb_backup" | "UNIVERSE" | "Controls whether server_broadcast_address entry should be used during yb_backup.py backup/restore" | "Boolean" | | "Slow Queries Timeout" | "yb.query_stats.slow_queries.timeout_secs" | "UNIVERSE" | "Timeout in secs for slow queries" | "Long" | | "YSQL Queries Timeout" | "yb.ysql_timeout_secs" | "UNIVERSE" | "Timeout in secs for YSQL queries" | "Long" | +| "YSQL Queries Timeout for Consistency Check Operations" | "yb.universe.consistency_check.ysql_timeout_secs" | "UNIVERSE" | "Timeout in secs for YSQL queries" | "Long" | | "Number of cores to keep" | "yb.num_cores_to_keep" | "UNIVERSE" | "Controls the configuration to set the number of cores to keep in the Ansible layer" | "Integer" | | "Whether to check YBA xCluster object is in sync with DB replication group" | "yb.xcluster.ensure_sync_get_replication_status" | "UNIVERSE" | "It ensures that the YBA XCluster object for tables that are in replication is in sync with replication group in DB. If they are not in sync and this is true, getting the xCluster object will throw an exception and the user has to resync the xCluster config." | "Boolean" | | "Network Load balancer health check ports" | "yb.universe.network_load_balancer.custom_health_check_ports" | "UNIVERSE" | "Ports to use for health checks performed by the network load balancer. Invalid and duplicate ports will be ignored. For GCP, only the first health check port would be used." | "Integer List" | @@ -256,6 +257,6 @@ | "Enable health checks for time drift between nodes" | "yb.health_checks.check_clock_time_drift" | "UNIVERSE" | "Enable health checks for time drift between nodes." | "Boolean" | | "Time drift threshold for warning health check" | "yb.health_checks.time_drift_wrn_threshold_ms" | "UNIVERSE" | "Threshold to raise a warning when time drift exceeds this amount" | "Integer" | | "Time drift threshold for error health check" | "yb.health_checks.time_drift_err_threshold_ms" | "UNIVERSE" | "Threshold to raise a error when time drift exceeds this amount" | "Integer" | -| "Enable consistency check for universe" | "yb.universe.consistency_check_enabled" | "UNIVERSE" | "When enabled, all universe operations will attempt consistency check validation before proceeding. Turn off in disaster scenarios to force perform actions." | "Boolean" | +| "Enable consistency check for universe" | "yb.universe.consistency_check.enabled" | "UNIVERSE" | "When enabled, all universe operations will attempt consistency check validation before proceeding. Turn off in disaster scenarios to force perform actions." | "Boolean" | | "Fail the the health check if no clock sync service is found" | "yb.health_checks.clock_sync_service_required" | "UNIVERSE" | "Require chrony or ntp(d) to be installed for health check to pass" | "Boolean" | | "Node Agent Enabler Installation Time-out" | "yb.node_agent.enabler.install_timeout" | "UNIVERSE" | "Node agent enabler installation time-out for the universe" | "Duration" | diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/UniverseTaskBase.java b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/UniverseTaskBase.java index 22aad89ff450..5ebb12900287 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/UniverseTaskBase.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/UniverseTaskBase.java @@ -389,6 +389,27 @@ public CustomBuilder taskTypes(Collection taskTypes) { TaskType.DeleteBackupScheduleKubernetes, TaskType.EnableNodeAgentInUniverse); + private static final Set SKIP_CONSISTENCY_CHECK_TASKS = + ImmutableSet.of( + TaskType.CreateBackup, + TaskType.CreateBackupSchedule, + TaskType.CreateBackupScheduleKubernetes, + TaskType.CreateKubernetesUniverse, + TaskType.CreateSupportBundle, + TaskType.CreateUniverse, + TaskType.BackupUniverse, + TaskType.DeleteBackupSchedule, + TaskType.DeleteBackupScheduleKubernetes, + TaskType.DeleteDrConfig, + TaskType.DeletePitrConfig, + TaskType.DeleteXClusterConfig, + TaskType.DestroyUniverse, + TaskType.DestroyKubernetesUniverse, + TaskType.EditBackupSchedule, + TaskType.EditBackupScheduleKubernetes, + TaskType.MultiTableBackup, + TaskType.ReadOnlyClusterDelete); + private static final Set RERUNNABLE_PLACEMENT_MODIFICATION_TASKS = ImmutableSet.of( TaskType.GFlagsUpgrade, @@ -1172,6 +1193,13 @@ public Universe lockAndFreezeUniverseForUpdate( Universe universe = lockUniverseForUpdate(universeUuid, updater); try { createPrecheckTasks(universe); + TaskType taskType = getTaskExecutor().getTaskType(getClass()); + if (!SKIP_CONSISTENCY_CHECK_TASKS.contains(taskType) + && confGetter.getConfForScope(universe, UniverseConfKeys.enableConsistencyCheck) + && universe.getUniverseDetails().getPrimaryCluster().userIntent.replicationFactor > 1) { + log.info("Creating consistency check task for task {}", taskType); + checkAndCreateConsistencyCheckTableTask(universe.getUniverseDetails().getPrimaryCluster()); + } if (isFirstTry()) { createFreezeUniverseTask(universeUuid, firstRunTxnCallback) .setSubTaskGroupType(SubTaskGroupType.ValidateConfigurations); @@ -1181,14 +1209,6 @@ public Universe lockAndFreezeUniverseForUpdate( createFreezeUniverseTask(universeUuid) .setSubTaskGroupType(SubTaskGroupType.ValidateConfigurations); } - if (confGetter.getConfForScope(universe, UniverseConfKeys.enableConsistencyCheck)) { - TaskType taskType = getTaskExecutor().getTaskType(getClass()); - if (taskType != TaskType.CreateUniverse && taskType != TaskType.CreateKubernetesUniverse) { - log.info("Creating consistency check task for task {}", taskType); - checkAndCreateConsistencyCheckTableTask( - universe.getUniverseDetails().getPrimaryCluster()); - } - } return Universe.getOrBadRequest(universeUuid); } catch (RuntimeException e) { unlockUniverseForUpdate(universeUuid); @@ -1462,7 +1482,7 @@ public void createDropSystemPlatformDBTablesTask( universe, CommonTypes.TableType.PGSQL_TABLE_TYPE, Util.SYSTEM_PLATFORM_DB, - Util.CONSISTENCY_CHECK) + Util.CONSISTENCY_CHECK_TABLE_NAME) .setSubTaskGroupType(subTaskGroupType); } diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/subtasks/UpdateConsistencyCheck.java b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/subtasks/UpdateConsistencyCheck.java index 9395ae4efea4..6f4e0ba23b64 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/subtasks/UpdateConsistencyCheck.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/subtasks/UpdateConsistencyCheck.java @@ -10,7 +10,7 @@ package com.yugabyte.yw.commissioner.tasks.subtasks; -import static com.yugabyte.yw.common.Util.CONSISTENCY_CHECK; +import static com.yugabyte.yw.common.Util.CONSISTENCY_CHECK_TABLE_NAME; import static com.yugabyte.yw.common.Util.SYSTEM_PLATFORM_DB; import static play.mvc.Http.Status.BAD_REQUEST; import static play.mvc.Http.Status.INTERNAL_SERVER_ERROR; @@ -23,23 +23,22 @@ import com.yugabyte.yw.common.PlatformServiceException; import com.yugabyte.yw.common.RecoverableException; import com.yugabyte.yw.common.ShellResponse; +import com.yugabyte.yw.common.Util; import com.yugabyte.yw.common.YsqlQueryExecutor.ConsistencyInfoResp; +import com.yugabyte.yw.common.config.UniverseConfKeys; import com.yugabyte.yw.forms.RunQueryFormData; import com.yugabyte.yw.forms.UniverseDefinitionTaskParams; import com.yugabyte.yw.forms.UniverseTaskParams; import com.yugabyte.yw.models.PendingConsistencyCheck; import com.yugabyte.yw.models.Universe; -import com.yugabyte.yw.models.helpers.ColumnDetails; -import com.yugabyte.yw.models.helpers.ColumnDetails.YQLDataType; import com.yugabyte.yw.models.helpers.CommonUtils; import com.yugabyte.yw.models.helpers.NodeDetails; -import com.yugabyte.yw.models.helpers.TableDetails; +import com.yugabyte.yw.models.helpers.TaskType; import java.time.Duration; -import java.util.ArrayList; +import java.util.UUID; import javax.inject.Inject; import lombok.extern.slf4j.Slf4j; -import org.yb.ColumnSchema.SortOrder; -import org.yb.CommonTypes.TableType; +import org.apache.commons.lang3.StringUtils; @Slf4j public class UpdateConsistencyCheck extends UniverseTaskBase { @@ -57,156 +56,292 @@ protected Params taskParams() { return (Params) taskParams; } - public void createConsistencyCheckTable() { - - CreateTable task = createTask(CreateTable.class); - ColumnDetails seqNumColumn = new ColumnDetails(); - seqNumColumn.isClusteringKey = true; - seqNumColumn.name = "seq_num"; - seqNumColumn.type = YQLDataType.INT; - seqNumColumn.sortOrder = SortOrder.ASC; - - ColumnDetails opUUIDColumn = new ColumnDetails(); - opUUIDColumn.name = "task_uuid"; - opUUIDColumn.type = YQLDataType.UUID; - - TableDetails details = new TableDetails(); - details.tableName = CONSISTENCY_CHECK; - details.keyspace = SYSTEM_PLATFORM_DB; - details.columns = new ArrayList<>(); - details.columns.add(seqNumColumn); - details.columns.add(opUUIDColumn); - - CreateTable.Params params = new CreateTable.Params(); - params.setUniverseUUID(taskParams().getUniverseUUID()); - params.tableType = TableType.PGSQL_TABLE_TYPE; - params.tableName = details.tableName; - params.tableDetails = details; - params.ifNotExist = true; + public void createConsistencyCheckTable(Universe universe, NodeDetails node) { + String createQuery = + String.format( + "CREATE TABLE IF NOT EXISTS %s (seq_num INT, task_uuid UUID, yw_uuid UUID, yw_host" + + " VARCHAR, PRIMARY KEY (task_uuid HASH, seq_num DESC)) SPLIT INTO 1 TABLETS;", + CONSISTENCY_CHECK_TABLE_NAME); + RunQueryFormData runQueryFormData = new RunQueryFormData(); + runQueryFormData.setDbName(SYSTEM_PLATFORM_DB); + runQueryFormData.setQuery(createQuery); + JsonNode ysqlResponse = + ysqlQueryExecutor.executeQueryInNodeShell( + universe, + runQueryFormData, + node, + confGetter.getConfForScope(universe, UniverseConfKeys.ysqlConsistencyTimeoutSecs)); + int retries = 0; + // Retry loop + while (ysqlResponse != null && ysqlResponse.has("error") && retries < 5) { + retries += 1; + ysqlResponse = + ysqlQueryExecutor.executeQueryInNodeShell( + universe, + runQueryFormData, + CommonUtils.getARandomLiveOrToBeRemovedTServer(universe), + confGetter.getConfForScope(universe, UniverseConfKeys.ysqlConsistencyTimeoutSecs)); + } + if (ysqlResponse != null && ysqlResponse.has("error")) { + TaskType taskType = getTaskExecutor().getTaskType(getClass()); + if (taskType == TaskType.CreateUniverse || taskType == TaskType.CreateKubernetesUniverse) { + log.error( + "Could not create initial consistency check table for new universe {}.", + universe.getName()); + throw new PlatformServiceException( + INTERNAL_SERVER_ERROR, ysqlResponse.get("error").asText()); + } else { + log.warn( + "Could not create consistency check table for existing universe {}, skipping. Is the" + + " universe healthy?", + universe.getName()); + return; + } + } + runQueryFormData.setQuery( + String.format( + "INSERT INTO %s (seq_num, task_uuid, yw_uuid, yw_host) VALUES (0, '%s', '%s'," + + " '%s')", + CONSISTENCY_CHECK_TABLE_NAME, + getTaskUUID(), + configHelper.getYugawareUUID(), + getYwHostname())); + ysqlResponse = + ysqlQueryExecutor.executeQueryInNodeShell( + universe, + runQueryFormData, + node, + confGetter.getConfForScope(universe, UniverseConfKeys.ysqlConsistencyTimeoutSecs)); + retries = 0; + // retry loop + while (ysqlResponse != null && ysqlResponse.has("error") && retries < 5) { + retries += 1; + node = CommonUtils.getARandomLiveOrToBeRemovedTServer(universe); + ysqlResponse = + ysqlQueryExecutor.executeQueryInNodeShell( + universe, + runQueryFormData, + node, + confGetter.getConfForScope(universe, UniverseConfKeys.ysqlConsistencyTimeoutSecs)); + } + if (ysqlResponse != null && ysqlResponse.has("error")) { + log.warn("Could not perform inital insert into consistency check table."); + throw new PlatformServiceException(INTERNAL_SERVER_ERROR, ysqlResponse.get("error").asText()); + } + // Update local YBA sequence number with initial value + updateUniverseSeqNum(universe, 0); + } - task.initialize(params); - task.setUserTaskUUID(getUserTaskUUID()); - task.run(); + private String getYwHostname() { + String host = Util.getHostname(); + if (StringUtils.isNotBlank(host) && !host.equalsIgnoreCase("localhost")) { + return host; + } + String ip = Util.getHostIP(); + if (!ip.equalsIgnoreCase("127.0.0.1")) { + return ip; + } + return ""; } @Override public void run() { Universe universe = Universe.getOrBadRequest(taskParams().getUniverseUUID()); + String universeName = universe.getName(); + UUID universeUUID = universe.getUniverseUUID(); + int ybaSeqNum = universe.getUniverseDetails().sequenceNumber; NodeDetails node; try { node = CommonUtils.getServerToRunYsqlQuery(universe, true); } catch (IllegalStateException e) { - log.warn("Could not find valid tserver, skipping consistency check."); - return; - } - ConsistencyInfoResp response; - try { - response = ysqlQueryExecutor.getConsistencyInfo(universe); - } catch (RecoverableException e) { - log.info("Creating consistency check table for the first time."); - createConsistencyCheckTable(); - RunQueryFormData runQueryFormData = new RunQueryFormData(); - runQueryFormData.setQuery( - String.format( - "INSERT INTO %s (seq_num, task_uuid) VALUES (0, '%s')", - CONSISTENCY_CHECK, getTaskUUID().toString())); - runQueryFormData.setDbName(SYSTEM_PLATFORM_DB); - JsonNode ysqlResponse = - ysqlQueryExecutor.executeQueryInNodeShell(universe, runQueryFormData, node); - if (ysqlResponse != null && ysqlResponse.has("error")) { - throw new PlatformServiceException( - INTERNAL_SERVER_ERROR, ysqlResponse.get("error").asText()); - } - // Update local YBA sequence number with initial value - updateUniverseSeqNum(0); + log.warn( + "Could not find valid tserver, skipping consistency check for universe {} ({}).", + universeName, + universeUUID); return; } - - if (response != null) { - int ybaSeqNum = universe.getUniverseDetails().sequenceNumber; - int dbSeqNum = response.getSeqNum(); - // Accept whatever is in the DB - if (ybaSeqNum == -1) { - log.info("Accepting whatever sequence number found in YBDB."); - updateUniverseSeqNum(dbSeqNum); + // Special case for -1 + if (ybaSeqNum == -1) { + log.info("YBA sequence number is -1 for universe {} ({})", universeName, universeUUID); + ConsistencyInfoResp response = null; + try { + response = ysqlQueryExecutor.getConsistencyInfo(universe); + } catch (RecoverableException e) { + // Table does not exist + log.info( + "Creating consistency check table for the first time for universe {} ({}).", + universeName, + universeUUID); + createConsistencyCheckTable(universe, node); return; - // Stale - } else if (dbSeqNum > ybaSeqNum) { - throw new PlatformServiceException( - BAD_REQUEST, "Can not operate on universe with stale metadata."); + } + if (response != null) { + log.info( + "Accepting db sequence number {} for universe {} ({})", + response.getSeqNum(), + universeName, + universeUUID); + updateUniverseSeqNum(universe, response.getSeqNum()); } else { - log.info("Validated YBA sequence number"); + log.warn( + "Could not read consistency info for universe {} ({}) and local is -1, skipping" + + " consistency check", + universeName, + universeUUID); } - } else { - log.warn( - "Could not read consistency info, skipping comparison validation but proceeding with" - + " update."); + return; } - // Update values + // Normal update int ybaSeqNumUpdate = universe.getUniverseDetails().sequenceNumber + 1; PendingConsistencyCheck pend = PendingConsistencyCheck.create(getTaskUUID(), universe); - String taskUUIDString = getTaskUUID().toString(); RunQueryFormData runQueryFormData = new RunQueryFormData(); String updateQuery = String.format( - "UPDATE %s SET seq_num = %d, task_uuid = '%s' WHERE seq_num < %d OR" - + " (seq_num = %d AND task_uuid = '%s') RETURNING seq_num, task_uuid", - CONSISTENCY_CHECK, + "WITH updated_rows AS (UPDATE %s SET seq_num = %d, task_uuid = '%s', yw_uuid = '%s'," + + " yw_host = '%s' WHERE seq_num < %d OR (seq_num = %d AND task_uuid = '%s')" + + " RETURNING seq_num, task_uuid) SELECT jsonb_agg(updated_rows) AS result FROM" + + " updated_rows;", + CONSISTENCY_CHECK_TABLE_NAME, ybaSeqNumUpdate, - taskUUIDString, + getTaskUUID(), + configHelper.getYugawareUUID(), + getYwHostname(), ybaSeqNumUpdate, ybaSeqNumUpdate, - taskUUIDString); - runQueryFormData.setQuery( - String.format( - "WITH updated_rows AS (%s) SELECT jsonb_agg(updated_rows) AS result FROM" - + " updated_rows;", - updateQuery)); + getTaskUUID()); + // Testing string that includes pg_sleep + if (confGetter.getConfForScope(universe, UniverseConfKeys.consistencyUpdateDelay) > 0) { + updateQuery = + String.format( + "BEGIN; UPDATE %s SET seq_num = %d, task_uuid = '%s', yw_uuid = '%s', yw_host = '%s'" + + " WHERE seq_num < %d OR (seq_num = %d AND task_uuid = '%s'); COMMIT; SELECT" + + " pg_sleep(%d); SELECT jsonb_agg(x) FROM (SELECT seq_num, task_uuid FROM %s" + + " ORDER BY seq_num DESC LIMIT 1) as x;", + CONSISTENCY_CHECK_TABLE_NAME, + ybaSeqNumUpdate, + getTaskUUID(), + configHelper.getYugawareUUID(), + getYwHostname(), + ybaSeqNumUpdate, + ybaSeqNumUpdate, + getTaskUUID(), + confGetter.getConfForScope(universe, UniverseConfKeys.consistencyUpdateDelay), + CONSISTENCY_CHECK_TABLE_NAME); + } + runQueryFormData.setQuery(updateQuery); runQueryFormData.setDbName(SYSTEM_PLATFORM_DB); + log.info( + "Attempting to update DB to sequence number {} for universe {} ({})", + ybaSeqNumUpdate, + universeName, + universeUUID); try { + // Attempt to update DB JsonNode ysqlResponse = - ysqlQueryExecutor.executeQueryInNodeShell(universe, runQueryFormData, node); + ysqlQueryExecutor.executeQueryInNodeShell( + universe, + runQueryFormData, + node, + confGetter.getConfForScope(universe, UniverseConfKeys.ysqlConsistencyTimeoutSecs)); int retries = 0; + // Retry loop while (ysqlResponse != null && ysqlResponse.has("error") && retries < 5) { - waitFor(Duration.ofMillis(2500)); + node = CommonUtils.getARandomLiveOrToBeRemovedTServer(universe); retries += 1; - ysqlResponse = ysqlQueryExecutor.executeQueryInNodeShell(universe, runQueryFormData, node); + ysqlResponse = + ysqlQueryExecutor.executeQueryInNodeShell( + universe, + runQueryFormData, + node, + confGetter.getConfForScope(universe, UniverseConfKeys.ysqlConsistencyTimeoutSecs)); + } + // Testing hook for CustomerTaskManager.handlePendingConsistencyTasks + if (confGetter.getConfForScope(universe, UniverseConfKeys.consistencyCheckPendingTest)) { + Util.shutdownYbaProcess(0); + waitFor(Duration.ofMillis(10000)); + } + // Error case with unexpected failure running remote query, unable to validate so reset to + // be safe + if (ysqlResponse != null && ysqlResponse.has("error")) { + log.warn( + "Consistency check is not active for universe {} due to error: {}. Resetting local" + + " sequence number.", + universeName, + ysqlResponse.get("error").asText()); + updateUniverseSeqNum(universe, -1); + return; } if (ysqlResponse != null && ysqlResponse.has("result")) { ShellResponse shellResponse = ShellResponse.create(0, ysqlResponse.get("result").asText()); ObjectMapper objectMapper = new ObjectMapper(); JsonNode jsonNode = objectMapper.readTree(CommonUtils.extractJsonisedSqlResponse(shellResponse)); - if (jsonNode != null && jsonNode.get(0) != null && jsonNode.get(0).has("seq_num")) { - // Update local YBA sequence number with update value - updateUniverseSeqNum(jsonNode.get(0).get("seq_num").asInt()); - } else { - // no rows updated, must be stale + // No rows updated, stale metadata + if (jsonNode == null + || jsonNode.get(0) == null + || !jsonNode.get(0).has("seq_num") + || !jsonNode.get(0).has("task_uuid")) { + // Best effort try to read what's in the DB for better error message. + ConsistencyInfoResp response = ysqlQueryExecutor.getConsistencyInfo(universe); + if (response != null) { + throw new PlatformServiceException( + BAD_REQUEST, + String.format( + "No rows updated performing consistency check, stale universe metadata. DB at" + + " version %d set by YBA %s (%s) during task %s. Task should be run from" + + " up to date YBA or contact Yugabyte Support to resolve.", + response.getSeqNum(), + response.getYwHost(), + response.getYwUUID(), + response.getTaskUUID())); + } throw new PlatformServiceException( BAD_REQUEST, - "No rows updated performing consistency check, potentially stale universe" - + " metadata."); + "No rows updated performing consistency check, stale universe metadata. Task should" + + " be run from up to date YBA or contact Yugabyte Support to resolve."); } - } else if (ysqlResponse != null && ysqlResponse.has("error")) { - log.warn( - "Consistency check is not active due to error: {}.", - ysqlResponse.get("error").asText()); + // Valid result, perform update + int dbSeqNum = jsonNode.get(0).get("seq_num").asInt(); + UUID dbTaskUuid = UUID.fromString(jsonNode.get(0).get("task_uuid").asText()); + + // Doubtful this should ever execute, but in case, treat as stale. + if (dbSeqNum != ybaSeqNumUpdate || !dbTaskUuid.equals(getTaskUUID())) { + throw new PlatformServiceException( + BAD_REQUEST, + String.format( + "Found different values (seq_num: %d, task_uuid: %s) in DB after intended update." + + " Expected seq_num: %d and task_uuid: %s.", + dbSeqNum, dbTaskUuid, ybaSeqNumUpdate, getTaskUUID())); + } + // Update local YBA sequence number with update value + log.info( + "Updated DB to seq_num: {} and task_uuid: {}, setting local sequence number for" + + " universe {}.", + dbSeqNum, + dbTaskUuid, + universeName); + updateUniverseSeqNum(universe, dbSeqNum); } } catch (JsonProcessingException e) { - log.warn("Error processing JSON response from update query: {}.", e.getMessage()); + log.warn( + "Error processing JSON response from update query: {}. Consistency check may not be" + + " active.", + e.getMessage()); + updateUniverseSeqNum(universe, -1); } finally { pend.delete(); } } - private void updateUniverseSeqNum(int seqNum) { + private void updateUniverseSeqNum(Universe universe, int seqNum) { Universe.UniverseUpdater updater = - universe -> { - UniverseDefinitionTaskParams universeDetails = universe.getUniverseDetails(); + u -> { + UniverseDefinitionTaskParams universeDetails = u.getUniverseDetails(); universeDetails.sequenceNumber = seqNum; - universe.setUniverseDetails(universeDetails); + u.setUniverseDetails(universeDetails); }; saveUniverseDetails(updater); + log.info("Updated {} universe details sequence number to {}.", universe.getName(), seqNum); } } diff --git a/managed/src/main/java/com/yugabyte/yw/common/ConfigHelper.java b/managed/src/main/java/com/yugabyte/yw/common/ConfigHelper.java index 2572ac787bb4..c14c8a7d5181 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/ConfigHelper.java +++ b/managed/src/main/java/com/yugabyte/yw/common/ConfigHelper.java @@ -131,6 +131,14 @@ public void loadSoftwareVersiontoDB(Environment environment) { loadConfigToDB(ConfigType.YugawareMetadata, ywMetadata); } + public UUID getYugawareUUID() { + Object ywUUID = getConfig(ConfigHelper.ConfigType.YugawareMetadata).get("yugaware_uuid"); + if (ywUUID != null) { + return UUID.fromString(ywUUID.toString()); + } + return null; + } + public void loadConfigsToDB(Environment environment) { LoaderOptions loaderOptions = new LoaderOptions(); diff --git a/managed/src/main/java/com/yugabyte/yw/common/CustomerTaskManager.java b/managed/src/main/java/com/yugabyte/yw/common/CustomerTaskManager.java index b972a5bb51f3..30d890168272 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/CustomerTaskManager.java +++ b/managed/src/main/java/com/yugabyte/yw/common/CustomerTaskManager.java @@ -739,7 +739,7 @@ public void handlePendingConsistencyTasks() { Universe universe = Universe.getOrBadRequest(pending.getUniverse().getUniverseUUID()); ConsistencyInfoResp response = ysqlQueryExecutor.getConsistencyInfo(universe); if (response != null) { - UUID dbTaskUuid = response.getTaskUuid(); + UUID dbTaskUuid = response.getTaskUUID(); int dbSeqNum = response.getSeqNum(); if (dbTaskUuid.equals(pending.getTaskUuid())) { // Updated on DB side before crash, set ourselves to whatever is in the DB diff --git a/managed/src/main/java/com/yugabyte/yw/common/Util.java b/managed/src/main/java/com/yugabyte/yw/common/Util.java index 3f646977fa79..a1b01366529d 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/Util.java +++ b/managed/src/main/java/com/yugabyte/yw/common/Util.java @@ -108,7 +108,7 @@ public class Util { public static final String DEFAULT_YCQL_USERNAME = "cassandra"; public static final String DEFAULT_YCQL_PASSWORD = "cassandra"; public static final String YUGABYTE_DB = "yugabyte"; - public static final String CONSISTENCY_CHECK = "consistency_check"; + public static final String CONSISTENCY_CHECK_TABLE_NAME = "yba_consistency_check"; public static final int MIN_NUM_BACKUPS_TO_RETAIN = 3; public static final String REDACT = "REDACTED"; public static final String KEY_LOCATION_SUFFIX = "/backup_keys.json"; diff --git a/managed/src/main/java/com/yugabyte/yw/common/YsqlQueryExecutor.java b/managed/src/main/java/com/yugabyte/yw/common/YsqlQueryExecutor.java index cdce2f380ea7..93259759422b 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/YsqlQueryExecutor.java +++ b/managed/src/main/java/com/yugabyte/yw/common/YsqlQueryExecutor.java @@ -2,7 +2,7 @@ package com.yugabyte.yw.common; -import static com.yugabyte.yw.common.Util.CONSISTENCY_CHECK; +import static com.yugabyte.yw.common.Util.CONSISTENCY_CHECK_TABLE_NAME; import static com.yugabyte.yw.common.Util.SYSTEM_PLATFORM_DB; import static play.libs.Json.newObject; import static play.libs.Json.toJson; @@ -509,13 +509,27 @@ public static class ConsistencyInfoResp { @JsonProperty("seq_num") private int seqNum; - public UUID getTaskUuid() { + @JsonProperty("yw_uuid") + private UUID ywUuid; + + @JsonProperty("yw_host") + private String ywHost; + + public UUID getTaskUUID() { return taskUuid; } public int getSeqNum() { return seqNum; } + + public UUID getYwUUID() { + return ywUuid; + } + + public String getYwHost() { + return ywHost; + } } public ConsistencyInfoResp getConsistencyInfo(Universe universe) throws RecoverableException { @@ -530,14 +544,16 @@ public ConsistencyInfoResp getConsistencyInfo(Universe universe) throws Recovera ysqlQuery.setDbName(SYSTEM_PLATFORM_DB); ysqlQuery.setQuery( String.format( - "SELECT seq_num, task_uuid FROM %s ORDER BY seq_num DESC LIMIT 1", CONSISTENCY_CHECK)); + "SELECT seq_num, task_uuid, yw_uuid, yw_host FROM %s ORDER BY seq_num DESC LIMIT 1", + CONSISTENCY_CHECK_TABLE_NAME)); JsonNode response = executeQueryInNodeShell(universe, ysqlQuery, node); int retries = 0; while (response != null && response.has("error") && retries < 5) { - String match = String.format("relation \"%s\" does not exist", CONSISTENCY_CHECK); + String match = String.format("relation \"%s\" does not exist", CONSISTENCY_CHECK_TABLE_NAME); if (response.get("error").asText().contains(match)) { throw new RecoverableException("consistency_check table does not exist"); } + node = CommonUtils.getARandomLiveOrToBeRemovedTServer(universe); retries += 1; response = executeQueryInNodeShell(universe, ysqlQuery, node); } diff --git a/managed/src/main/java/com/yugabyte/yw/common/config/UniverseConfKeys.java b/managed/src/main/java/com/yugabyte/yw/common/config/UniverseConfKeys.java index 04a32963f36c..99efc6b6540e 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/config/UniverseConfKeys.java +++ b/managed/src/main/java/com/yugabyte/yw/common/config/UniverseConfKeys.java @@ -826,6 +826,14 @@ public class UniverseConfKeys extends RuntimeConfigKeysModule { "Timeout in secs for YSQL queries", ConfDataType.LongType, ImmutableList.of(ConfKeyTags.PUBLIC)); + public static final ConfKeyInfo ysqlConsistencyTimeoutSecs = + new ConfKeyInfo<>( + "yb.universe.consistency_check.ysql_timeout_secs", + ScopeType.UNIVERSE, + "YSQL Queries Timeout for Consistency Check Operations", + "Timeout in secs for YSQL queries", + ConfDataType.LongType, + ImmutableList.of(ConfKeyTags.PUBLIC)); public static final ConfKeyInfo numCoresToKeep = new ConfKeyInfo<>( "yb.num_cores_to_keep", @@ -1323,7 +1331,7 @@ public class UniverseConfKeys extends RuntimeConfigKeysModule { ImmutableList.of(ConfKeyTags.INTERNAL)); public static final ConfKeyInfo enableConsistencyCheck = new ConfKeyInfo<>( - "yb.universe.consistency_check_enabled", + "yb.universe.consistency_check.enabled", ScopeType.UNIVERSE, "Enable consistency check for universe", "When enabled, all universe operations will attempt consistency check validation before" @@ -1346,4 +1354,22 @@ public class UniverseConfKeys extends RuntimeConfigKeysModule { "Node agent enabler installation time-out for the universe", ConfDataType.DurationType, ImmutableList.of(ConfKeyTags.PUBLIC)); + public static final ConfKeyInfo consistencyUpdateDelay = + new ConfKeyInfo<>( + "yb.universe.consistency_check.update_delay_secs", + ScopeType.UNIVERSE, + "Delay after updating consistency check information in YBDB", + "Introduces a sleep in the PG update query to help simulate testing with unreliable YSQL" + + " connection", + ConfDataType.LongType, + ImmutableList.of(ConfKeyTags.INTERNAL)); + public static final ConfKeyInfo consistencyCheckPendingTest = + new ConfKeyInfo<>( + "yb.universe.consistency_check.test_pending", + ScopeType.UNIVERSE, + "Test handling of pending consistency check update", + "YBA will shutdown immediately after updating the DB sequence number but before saving to" + + " local universe details", + ConfDataType.BooleanType, + ImmutableList.of(ConfKeyTags.INTERNAL)); } diff --git a/managed/src/main/java/com/yugabyte/yw/models/helpers/CommonUtils.java b/managed/src/main/java/com/yugabyte/yw/models/helpers/CommonUtils.java index dc8ce8f21ba9..0ecc963e2b6b 100644 --- a/managed/src/main/java/com/yugabyte/yw/models/helpers/CommonUtils.java +++ b/managed/src/main/java/com/yugabyte/yw/models/helpers/CommonUtils.java @@ -810,7 +810,7 @@ public static NodeDetails getServerToRunYsqlQuery(Universe universe, boolean use .toList(); NodeDetails sameRegionTServer; if (useToBeRemoved) { - sameRegionTServer = getARandomLiveOrRemovedTServer(sameRegionNodes); + sameRegionTServer = getARandomLiveOrToBeRemovedTServer(sameRegionNodes); } else { sameRegionTServer = getARandomLiveTServer(sameRegionNodes); } @@ -820,7 +820,7 @@ public static NodeDetails getServerToRunYsqlQuery(Universe universe, boolean use } } if (useToBeRemoved) { - return getARandomLiveOrRemovedTServer(universe); + return getARandomLiveOrToBeRemovedTServer(universe); } return getARandomLiveTServer(universe); } @@ -837,9 +837,9 @@ private static NodeDetails getARandomLiveTServer(Collection nodes) return tserverLiveNodes.get(new Random().nextInt(tserverLiveNodes.size())); } - public static NodeDetails getARandomLiveOrRemovedTServer(Universe universe) { + public static NodeDetails getARandomLiveOrToBeRemovedTServer(Universe universe) { NodeDetails randomLiveOrRemovedTServer = - getARandomLiveOrRemovedTServer(universe.getTServersInPrimaryCluster()); + getARandomLiveOrToBeRemovedTServer(universe.getTServersInPrimaryCluster()); if (randomLiveOrRemovedTServer == null) { throw new IllegalStateException( "No live or toBeRemoved TServers found for Universe UUID: " + universe.getUniverseUUID()); @@ -847,7 +847,7 @@ public static NodeDetails getARandomLiveOrRemovedTServer(Universe universe) { return randomLiveOrRemovedTServer; } - private static NodeDetails getARandomLiveOrRemovedTServer(Collection nodes) { + private static NodeDetails getARandomLiveOrToBeRemovedTServer(Collection nodes) { List tserverLiveNodes = nodes.stream() .filter(nodeDetails -> nodeDetails.isTserver) diff --git a/managed/src/main/resources/reference.conf b/managed/src/main/resources/reference.conf index e85740662326..32d72ca7d55b 100644 --- a/managed/src/main/resources/reference.conf +++ b/managed/src/main/resources/reference.conf @@ -245,8 +245,13 @@ yb { otel_collector_metrics_port = 8889 audit_logging_enabled = false allow_connection_pooling = false - consistency_check_enabled = false default_service_scope_for_k8s="AZ" # possible: Namespaced, AZ + consistency_check { + enabled = true + test_pending = false # NEVER SET IN PROD + update_delay_secs = 0 # NEVER SET IN PROD + ysql_timeout_secs = 30 + } } xcluster { diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/MockUpgrade.java b/managed/src/test/java/com/yugabyte/yw/commissioner/MockUpgrade.java index f7451cba95a0..43061fa04012 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/MockUpgrade.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/MockUpgrade.java @@ -176,10 +176,10 @@ public MockUpgrade precheckTasks(boolean enableYSQL, TaskType... taskTypes) { for (TaskType taskType : taskTypes) { addTask(taskType, null); } - addTask(TaskType.FreezeUniverse, null); if (enableYSQL) { addTask(TaskType.UpdateConsistencyCheck, null); } + addTask(TaskType.FreezeUniverse, null); addHookTasks(true); return this; } diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/AddNodeToUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/AddNodeToUniverseTest.java index 83ea33d1395a..8d52ecaeda93 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/AddNodeToUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/AddNodeToUniverseTest.java @@ -194,8 +194,8 @@ private TaskInfo submitTask(UUID universeUUID, Provider provider, String nodeNam ImmutableList.of( TaskType.InstanceExistCheck, // only if it wasn't decommissioned. TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, // to Adding TaskType.SetNodeStatus, // to Adding for 'To Be Added' TaskType.AnsibleCreateServer, @@ -252,8 +252,8 @@ private TaskInfo submitTask(UUID universeUUID, Provider provider, String nodeNam ImmutableList.of( TaskType.CheckLeaderlessTablets, TaskType.PreflightNodeCheck, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.SetNodeStatus, TaskType.AnsibleCreateServer, @@ -310,8 +310,8 @@ private TaskInfo submitTask(UUID universeUUID, Provider provider, String nodeNam ImmutableList.of( TaskType.InstanceExistCheck, TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.SetNodeStatus, TaskType.AnsibleCreateServer, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/DeleteNodeFromUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/DeleteNodeFromUniverseTest.java index a68f02833ffd..b3e25bdd6246 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/DeleteNodeFromUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/DeleteNodeFromUniverseTest.java @@ -50,8 +50,8 @@ public class DeleteNodeFromUniverseTest extends CommissionerBaseTest { private static final List DELETE_NODE_TASK_SEQUENCE_WITH_INSTANCE = ImmutableList.of( - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.CheckNodeSafeToDelete, TaskType.AnsibleDestroyServer, TaskType.DeleteNode, @@ -59,8 +59,8 @@ public class DeleteNodeFromUniverseTest extends CommissionerBaseTest { private static final List DELETE_NODE_TASK_SEQUENCE_WITHOUT_INSTANCE = ImmutableList.of( - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.RemoveNodeAgent, TaskType.DeleteNode, TaskType.UniverseUpdateSucceeded); diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/DeleteXClusterConfigTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/DeleteXClusterConfigTest.java index cacc70f1c48f..b2f8657100d9 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/DeleteXClusterConfigTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/DeleteXClusterConfigTest.java @@ -87,9 +87,7 @@ public class DeleteXClusterConfigTest extends CommissionerBaseTest { List DELETE_XCLUSTER_CONFIG_TASK_SEQUENCE = ImmutableList.of( TaskType.FreezeUniverse, - TaskType.UpdateConsistencyCheck, TaskType.FreezeUniverse, - TaskType.UpdateConsistencyCheck, TaskType.XClusterConfigSetStatus, TaskType.DeleteReplication, TaskType.DeleteBootstrapIds, @@ -382,7 +380,7 @@ public void testDeleteXClusterFailure() { assertNotNull(subtaskGroup); assertEquals(DELETE_XCLUSTER_CONFIG_TASK_SEQUENCE.get(i), subtaskGroup.getTaskType()); } - String taskErrMsg = taskInfo.getSubTasks().get(5).getErrorMessage(); + String taskErrMsg = taskInfo.getSubTasks().get(3).getErrorMessage(); String expectedErrMsg = String.format( "Failed to delete replication for XClusterConfig(%s): %s", diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/DestroyKubernetesUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/DestroyKubernetesUniverseTest.java index fcfb8316c086..db00917d0f88 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/DestroyKubernetesUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/DestroyKubernetesUniverseTest.java @@ -191,8 +191,6 @@ private void assertTaskSequence( // Shift by 1 subtask due to FreezeUniverse. assertEquals( TaskType.FreezeUniverse, subTasksByPosition.get(position++).get(0).getTaskType()); - assertEquals( - TaskType.UpdateConsistencyCheck, subTasksByPosition.get(position++).get(0).getTaskType()); } for (int i = 0; i < KUBERNETES_DESTROY_UNIVERSE_TASKS.size(); i++) { TaskType taskType = KUBERNETES_DESTROY_UNIVERSE_TASKS.get(i); diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditKubernetesUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditKubernetesUniverseTest.java index aaeeab04a16d..7760948cfc3d 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditKubernetesUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditKubernetesUniverseTest.java @@ -214,7 +214,6 @@ private void setupUniverseMultiAZ(boolean setMasters, int numTservers) { ImmutableList.of( TaskType.CheckLeaderlessTablets, TaskType.FreezeUniverse, - TaskType.UpdateConsistencyCheck, TaskType.HandleKubernetesNamespacedServices, TaskType.KubernetesCommandExecutor, TaskType.KubernetesCheckNumPod, @@ -232,7 +231,6 @@ private List getExpectedAddPodTaskResults() { Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), - Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of("commandType", HELM_UPGRADE.name())), Json.toJson(ImmutableMap.of("commandType", WAIT_FOR_PODS.name())), Json.toJson(ImmutableMap.of("commandType", POD_INFO.name())), @@ -249,7 +247,6 @@ private List getExpectedAddPodTaskResults() { ImmutableList.of( TaskType.CheckLeaderlessTablets, TaskType.FreezeUniverse, - TaskType.UpdateConsistencyCheck, TaskType.HandleKubernetesNamespacedServices, TaskType.UpdatePlacementInfo, TaskType.WaitForDataMove, @@ -272,7 +269,6 @@ private List getExpectedRemovePodTaskResults() { Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), - Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of("commandType", HELM_UPGRADE.name())), Json.toJson(ImmutableMap.of("commandType", WAIT_FOR_PODS.name())), Json.toJson(ImmutableMap.of()), @@ -288,7 +284,6 @@ private List getExpectedRemovePodTaskResults() { ImmutableList.of( TaskType.CheckLeaderlessTablets, TaskType.FreezeUniverse, - TaskType.UpdateConsistencyCheck, TaskType.HandleKubernetesNamespacedServices, TaskType.UpdatePlacementInfo, TaskType.CheckUnderReplicatedTablets, @@ -325,7 +320,6 @@ private List getExpectedChangeInstaceTypeResults() { Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), - Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of("commandType", HELM_UPGRADE.name())), Json.toJson(ImmutableMap.of("commandType", WAIT_FOR_POD.name())), Json.toJson(ImmutableMap.of()), diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditUniverseTest.java index 17219e4f1537..540439d7351b 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditUniverseTest.java @@ -75,8 +75,8 @@ public class EditUniverseTest extends UniverseModifyBaseTest { private static final List UNIVERSE_EXPAND_TASK_SEQUENCE = ImmutableList.of( TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeStatus, // ToBeAdded to Adding TaskType.AnsibleCreateServer, TaskType.AnsibleUpdateNodeInfo, @@ -125,8 +125,8 @@ public class EditUniverseTest extends UniverseModifyBaseTest { ImmutableList.of( TaskType.CheckLeaderlessTablets, TaskType.PreflightNodeCheck, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeStatus, // ToBeAdded to Adding TaskType.AnsibleCreateServer, TaskType.AnsibleUpdateNodeInfo, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditXClusterConfigTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditXClusterConfigTest.java index 8e289d9401b3..0d96a0aaae0a 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditXClusterConfigTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditXClusterConfigTest.java @@ -114,11 +114,11 @@ public class EditXClusterConfigTest extends CommissionerBaseTest { List RENAME_FAILURE_TASK_SEQUENCE = ImmutableList.of( // Freeze for source. - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, - // Freeze for target. TaskType.FreezeUniverse, + // Freeze for target. TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.XClusterConfigSetStatus, TaskType.XClusterConfigRename, TaskType.XClusterConfigSetStatus, @@ -128,11 +128,11 @@ public class EditXClusterConfigTest extends CommissionerBaseTest { List ADD_TABLE_IS_ALTER_DONE_FAILURE = ImmutableList.of( // Freeze for source. - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, - // Freeze for target. TaskType.FreezeUniverse, + // Freeze for target. TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.XClusterConfigSetStatus, TaskType.XClusterConfigSetStatusForTables, TaskType.BootstrapProducer, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/PauseUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/PauseUniverseTest.java index aaf69ccfb1be..4c1ff896f10c 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/PauseUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/PauseUniverseTest.java @@ -103,7 +103,6 @@ private void setupUniverse(boolean updateInProgress) { private static final List PAUSE_UNIVERSE_TASKS = ImmutableList.of( TaskType.FreezeUniverse, - TaskType.UpdateConsistencyCheck, TaskType.SetNodeState, TaskType.AnsibleClusterServerCtl, TaskType.SetNodeState, @@ -115,7 +114,6 @@ private void setupUniverse(boolean updateInProgress) { private static final List PAUSE_UNIVERSE_EXPECTED_RESULTS = ImmutableList.of( - Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of("process", "tserver", "command", "stop")), diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ReadOnlyClusterCreateTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ReadOnlyClusterCreateTest.java index f143c312cde8..d7d4680353dc 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ReadOnlyClusterCreateTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ReadOnlyClusterCreateTest.java @@ -86,8 +86,8 @@ private TaskInfo submitTask(UniverseDefinitionTaskParams taskParams) { private static final List CLUSTER_CREATE_TASK_SEQUENCE = ImmutableList.of( TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeStatus, TaskType.AnsibleCreateServer, TaskType.AnsibleUpdateNodeInfo, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ReadOnlyClusterDeleteTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ReadOnlyClusterDeleteTest.java index 5d4a55100692..c66d96f85783 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ReadOnlyClusterDeleteTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ReadOnlyClusterDeleteTest.java @@ -132,7 +132,6 @@ private Cluster addReadReplica(Region region) { private static final List CLUSTER_DELETE_TASK_SEQUENCE = ImmutableList.of( TaskType.FreezeUniverse, - TaskType.UpdateConsistencyCheck, TaskType.CheckLeaderlessTablets, TaskType.SetNodeState, TaskType.AnsibleDestroyServer, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/RebootNodeInUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/RebootNodeInUniverseTest.java index 6265f8715bf0..279599b19eb7 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/RebootNodeInUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/RebootNodeInUniverseTest.java @@ -108,8 +108,8 @@ public void setUp(boolean withMaster, int numNodes, int replicationFactor) { private List rebootNodeTaskSequence(boolean isHardReboot) { return ImmutableList.of( TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.AnsibleClusterServerCtl, isHardReboot ? TaskType.HardRebootServer : TaskType.RebootServer, @@ -139,8 +139,8 @@ private List rebootNodeTaskExpectedResults(boolean isHardReboot) { private List rebootNodeWithMaster(boolean isHardReboot) { return ImmutableList.of( TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.AnsibleClusterServerCtl, TaskType.AnsibleClusterServerCtl, @@ -180,8 +180,8 @@ private List rebootNodeWithMasterResults(boolean isHardReboot) { private List rebootNodeWithOnlyMaster(boolean isHardReboot) { return ImmutableList.of( TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.AnsibleClusterServerCtl, TaskType.WaitForMasterLeader, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ReleaseInstanceFromUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ReleaseInstanceFromUniverseTest.java index 5ab3e42d223c..4841ee03151f 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ReleaseInstanceFromUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ReleaseInstanceFromUniverseTest.java @@ -153,8 +153,8 @@ private TaskInfo submitTask(NodeTaskParams taskParams, String nodeName, int vers private static final List RELEASE_INSTANCE_TASK_SEQUENCE = ImmutableList.of( TaskType.CheckNodeSafeToDelete, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.WaitForMasterLeader, TaskType.SetNodeState, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/RemoveNodeFromUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/RemoveNodeFromUniverseTest.java index 782cd4e6b3cf..4514afdb5150 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/RemoveNodeFromUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/RemoveNodeFromUniverseTest.java @@ -173,8 +173,8 @@ private TaskInfo submitTask(NodeTaskParams taskParams, String nodeName) { private static final List REMOVE_NODE_TASK_SEQUENCE = ImmutableList.of( TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.UpdatePlacementInfo, TaskType.WaitForDataMove, @@ -201,8 +201,8 @@ private TaskInfo submitTask(NodeTaskParams taskParams, String nodeName) { private static final List REMOVE_NODE_WITH_MASTER_REPLACE = ImmutableList.of( TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.UpdatePlacementInfo, TaskType.WaitForDataMove, @@ -271,8 +271,8 @@ private TaskInfo submitTask(NodeTaskParams taskParams, String nodeName) { private static final List REMOVE_NODE_WITH_MASTER = ImmutableList.of( TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.UpdatePlacementInfo, TaskType.WaitForDataMove, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ResumeUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ResumeUniverseTest.java index 4f661720d434..939d416153e2 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ResumeUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/ResumeUniverseTest.java @@ -113,7 +113,6 @@ private void setupUniverse(boolean updateInProgress, int numOfNodes) { private static final List RESUME_UNIVERSE_TASKS = ImmutableList.of( TaskType.FreezeUniverse, - TaskType.UpdateConsistencyCheck, TaskType.ResumeServer, TaskType.WaitForClockSync, // Ensure clock skew is low enough TaskType.AnsibleClusterServerCtl, @@ -131,7 +130,6 @@ private void setupUniverse(boolean updateInProgress, int numOfNodes) { private static final List RESUME_ENCRYPTION_AT_REST_UNIVERSE_TASKS = ImmutableList.of( TaskType.FreezeUniverse, - TaskType.UpdateConsistencyCheck, TaskType.ResumeServer, TaskType.WaitForClockSync, // Ensure clock skew is low enough TaskType.AnsibleClusterServerCtl, @@ -152,7 +150,6 @@ private void setupUniverse(boolean updateInProgress, int numOfNodes) { Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), - Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of("process", "master", "command", "start")), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), @@ -170,7 +167,6 @@ private void setupUniverse(boolean updateInProgress, int numOfNodes) { Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), - Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of("process", "master", "command", "start")), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/StartMasterOnNodeTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/StartMasterOnNodeTest.java index a8533160be96..8faa7e02be6b 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/StartMasterOnNodeTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/StartMasterOnNodeTest.java @@ -153,8 +153,8 @@ private TaskInfo submitTask(NodeTaskParams taskParams, String nodeName) { private static final List START_MASTER_TASK_SEQUENCE = ImmutableList.of( TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.WaitForClockSync, // Ensure clock skew is low enough TaskType.AnsibleConfigureServers, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/StartNodeInUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/StartNodeInUniverseTest.java index e944a5cf7674..b4280fa029b3 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/StartNodeInUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/StartNodeInUniverseTest.java @@ -173,8 +173,8 @@ private Universe setMasters(Universe universe, String... nodeNames) { List START_NODE_TASK_SEQUENCE = ImmutableList.of( - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.WaitForClockSync, // Ensure clock skew is low enough TaskType.AnsibleConfigureServers, @@ -203,8 +203,8 @@ private Universe setMasters(Universe universe, String... nodeNames) { List WITH_MASTER_UNDER_REPLICATED = ImmutableList.of( - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.WaitForClockSync, // Ensure clock skew is low enough TaskType.AnsibleConfigureServers, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/StopNodeInUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/StopNodeInUniverseTest.java index 0997cd303508..32a6b70be32e 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/StopNodeInUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/StopNodeInUniverseTest.java @@ -164,8 +164,8 @@ private TaskInfo submitTask(NodeTaskParams taskParams, String nodeName) { TaskType.CheckUnderReplicatedTablets, TaskType.CheckNodesAreSafeToTakeDown, TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.ModifyBlackList, TaskType.SetNodeState, TaskType.ModifyBlackList, @@ -200,8 +200,8 @@ private TaskInfo submitTask(NodeTaskParams taskParams, String nodeName) { TaskType.CheckUnderReplicatedTablets, TaskType.CheckNodesAreSafeToTakeDown, TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.ModifyBlackList, TaskType.SetNodeState, TaskType.ModifyBlackList, @@ -238,8 +238,8 @@ private TaskInfo submitTask(NodeTaskParams taskParams, String nodeName) { TaskType.CheckUnderReplicatedTablets, TaskType.CheckNodesAreSafeToTakeDown, TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.ModifyBlackList, TaskType.SetNodeState, TaskType.ModifyBlackList, @@ -292,8 +292,8 @@ private TaskInfo submitTask(NodeTaskParams taskParams, String nodeName) { TaskType.CheckUnderReplicatedTablets, TaskType.CheckNodesAreSafeToTakeDown, TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.ModifyBlackList, TaskType.SetNodeState, TaskType.ModifyBlackList, @@ -347,8 +347,8 @@ private TaskInfo submitTask(NodeTaskParams taskParams, String nodeName) { ImmutableList.of( TaskType.CheckNodesAreSafeToTakeDown, TaskType.CheckLeaderlessTablets, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.SetNodeState, TaskType.ChangeMasterConfig, TaskType.AnsibleClusterServerCtl, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/UpgradeKubernetesUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/UpgradeKubernetesUniverseTest.java index 3c675c6b36d3..e93c71f9f97c 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/UpgradeKubernetesUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/UpgradeKubernetesUniverseTest.java @@ -243,8 +243,8 @@ private void setupUniverseMultiAZ(boolean setMasters) { private static final List KUBERNETES_UPGRADE_SOFTWARE_TASKS = ImmutableList.of( - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.KubernetesCommandExecutor, TaskType.CheckNodesAreSafeToTakeDown, TaskType.KubernetesCommandExecutor, @@ -299,8 +299,8 @@ private void setupUniverseMultiAZ(boolean setMasters) { private static final List KUBERNETES_UPGRADE_GFLAG_TASKS = ImmutableList.of( - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.UpdateAndPersistGFlags, TaskType.KubernetesCommandExecutor, TaskType.CheckNodesAreSafeToTakeDown, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/LocalProviderUniverseTestBase.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/LocalProviderUniverseTestBase.java index 58f4db5128a3..d1d414fe77bf 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/LocalProviderUniverseTestBase.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/LocalProviderUniverseTestBase.java @@ -24,6 +24,7 @@ import com.yugabyte.yw.commissioner.tasks.UniverseTaskBase.ServerType; import com.yugabyte.yw.commissioner.tasks.subtasks.CheckClusterConsistency; import com.yugabyte.yw.common.ApiUtils; +import com.yugabyte.yw.common.ConfigHelper; import com.yugabyte.yw.common.LocalNodeManager; import com.yugabyte.yw.common.LocalNodeUniverseManager; import com.yugabyte.yw.common.ModelFactory; @@ -432,7 +433,7 @@ public void setUp() { settableRuntimeConfigFactory.globalRuntimeConf().setValue("yb.releases.use_redesign", "false"); settableRuntimeConfigFactory .globalRuntimeConf() - .setValue("yb.universe.consistency_check_enabled", "true"); + .setValue("yb.universe.consistency_check.enabled", "true"); Pair ipRange = getIpRange(); localNodeManager.setIpRangeStart(ipRange.getFirst()); localNodeManager.setIpRangeEnd(ipRange.getSecond()); @@ -455,6 +456,11 @@ public void setUp() { ReleaseManager.YBC_CONFIG_TYPE.name(), getMetadataJson("ybc-" + YBC_VERSION, true), "release"); + ObjectNode ywMetadata = Json.newObject(); + ywMetadata.put("yugaware_uuid", UUID.randomUUID().toString()); + ywMetadata.put("version", ybVersion); + YugawareProperty.addConfigProperty( + ConfigHelper.ConfigType.YugawareMetadata.name(), ywMetadata, "Yugaware Metadata"); customer = ModelFactory.testCustomer(); user = ModelFactory.testUser(customer); diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/UpdateConsistencyLocalTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/UpdateConsistencyLocalTest.java index ed2fce242cfe..1caf8360bc01 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/UpdateConsistencyLocalTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/local/UpdateConsistencyLocalTest.java @@ -2,7 +2,7 @@ package com.yugabyte.yw.commissioner.tasks.local; -import static com.yugabyte.yw.common.Util.CONSISTENCY_CHECK; +import static com.yugabyte.yw.common.Util.CONSISTENCY_CHECK_TABLE_NAME; import static com.yugabyte.yw.common.Util.SYSTEM_PLATFORM_DB; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsString; @@ -40,7 +40,7 @@ public void testUpdateStale() throws InterruptedException { details, universe, SYSTEM_PLATFORM_DB, - String.format("update %s set seq_num = 10", CONSISTENCY_CHECK), + String.format("update %s set seq_num = 10", CONSISTENCY_CHECK_TABLE_NAME), 10); assertTrue(ysqlResponse.isSuccess()); UniverseDefinitionTaskParams.Cluster cluster = @@ -59,6 +59,6 @@ public void testUpdateStale() throws InterruptedException { TaskInfo taskInfo = waitForTask(taskID, universe); assertEquals(TaskInfo.State.Failure, taskInfo.getTaskState()); String error = getAllErrorsStr(taskInfo); - assertThat(error, containsString("stale metadata")); + assertThat(error, containsString("stale universe metadata")); } } diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/CertsRotateTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/CertsRotateTest.java index 9096eb44077d..75539344677c 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/CertsRotateTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/CertsRotateTest.java @@ -517,8 +517,8 @@ public void testCertsRotateNonRollingUpgrade( subTasks.stream().collect(Collectors.groupingBy(TaskInfo::getPosition)); int position = 0; - assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateConsistencyCheck); + assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); // RootCA update task int expectedPosition = 16; if (rotateRootCA) { @@ -626,8 +626,8 @@ public void testCertsRotateRollingUpgrade( int expectedPosition = 81; int expectedNumberOfInvocations = 21; assertTaskType(subTasksByPosition.get(position++), TaskType.CheckNodesAreSafeToTakeDown); - assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateConsistencyCheck); + assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); if (rotateRootCA) { expectedPosition += 150; expectedNumberOfInvocations += 30; @@ -739,8 +739,8 @@ public void testCertsRotateSelfSignedServerCert( if (isRolling) { assertTaskType(subTasksByPosition.get(position++), TaskType.CheckNodesAreSafeToTakeDown); } - assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateConsistencyCheck); + assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); // RootCA update task int expectedPosition = isRolling ? 81 : 17; // Cert update tasks diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/FinalizeUpgradeTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/FinalizeUpgradeTest.java index 84addefafd80..d4f715690bff 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/FinalizeUpgradeTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/FinalizeUpgradeTest.java @@ -80,8 +80,8 @@ public void testFinalizeWithUpgradeSystemCatalog() throws Exception { subTasks.stream().collect(Collectors.groupingBy(TaskInfo::getPosition)); assertEquals(7, subTasks.size()); int position = 0; - assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateConsistencyCheck); + assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateUniverseState); assertTaskType(subTasksByPosition.get(position++), TaskType.RunYsqlUpgrade); assertTaskType(subTasksByPosition.get(position++), TaskType.PromoteAutoFlags); @@ -108,8 +108,8 @@ public void testFinalizeWithNoSystemCatalog() { subTasks.stream().collect(Collectors.groupingBy(TaskInfo::getPosition)); assertEquals(6, subTasks.size()); int position = 0; - assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateConsistencyCheck); + assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateUniverseState); assertTaskType(subTasksByPosition.get(position++), TaskType.PromoteAutoFlags); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateUniverseState); diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/GFlagsKubernetesUpgradeTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/GFlagsKubernetesUpgradeTest.java index aa3b8b0dd491..2014688cecca 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/GFlagsKubernetesUpgradeTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/GFlagsKubernetesUpgradeTest.java @@ -38,8 +38,8 @@ public class GFlagsKubernetesUpgradeTest extends KubernetesUpgradeTaskTest { private static final List UPGRADE_TASK_SEQUENCE = ImmutableList.of( TaskType.CheckNodesAreSafeToTakeDown, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.KubernetesCommandExecutor, TaskType.CheckNodesAreSafeToTakeDown, TaskType.KubernetesCommandExecutor, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/RollbackKubernetesUpgradeTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/RollbackKubernetesUpgradeTest.java index daefc9dbbba4..8f1c7f04d510 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/RollbackKubernetesUpgradeTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/RollbackKubernetesUpgradeTest.java @@ -46,8 +46,8 @@ public class RollbackKubernetesUpgradeTest extends KubernetesUpgradeTaskTest { private static final List UPGRADE_TASK_SEQUENCE = ImmutableList.of( TaskType.CheckNodesAreSafeToTakeDown, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.UpdateUniverseState, TaskType.RollbackAutoFlags, TaskType.KubernetesCommandExecutor, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/RollbackUpgradeTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/RollbackUpgradeTest.java index 106921197de7..d72e6bfd1c3a 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/RollbackUpgradeTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/RollbackUpgradeTest.java @@ -320,8 +320,8 @@ public void testRollbackUpgradeInRollingManner() { int position = 0; assertTaskType(subTasksByPosition.get(position++), TaskType.CheckNodesAreSafeToTakeDown); - assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateConsistencyCheck); + assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateUniverseState); assertTaskType(subTasksByPosition.get(position++), TaskType.RollbackAutoFlags); @@ -362,8 +362,8 @@ public void testRollbackUpgradeInNonRollingManner() { subTasks.stream().collect(Collectors.groupingBy(TaskInfo::getPosition)); int position = 0; - assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateConsistencyCheck); + assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateUniverseState); assertTaskType(subTasksByPosition.get(position++), TaskType.RollbackAutoFlags); @@ -406,8 +406,8 @@ public void testRollbackPartialUpgrade() { int position = 0; assertTaskType(subTasksByPosition.get(position++), TaskType.CheckNodesAreSafeToTakeDown); - assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateConsistencyCheck); + assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateUniverseState); assertTaskType(subTasksByPosition.get(position++), TaskType.RollbackAutoFlags); List downloadTasks = subTasksByPosition.get(position++); diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/SoftwareKubernetesUpgradeTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/SoftwareKubernetesUpgradeTest.java index 360807e35f4c..b9bd5ca29936 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/SoftwareKubernetesUpgradeTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/SoftwareKubernetesUpgradeTest.java @@ -51,8 +51,8 @@ public class SoftwareKubernetesUpgradeTest extends KubernetesUpgradeTaskTest { ImmutableList.of( TaskType.CheckNodesAreSafeToTakeDown, TaskType.CheckUpgrade, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.KubernetesCommandExecutor, TaskType.CheckNodesAreSafeToTakeDown, TaskType.KubernetesCommandExecutor, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/SoftwareKubernetesUpgradeYBTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/SoftwareKubernetesUpgradeYBTest.java index d952eaa85770..46d32e627596 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/SoftwareKubernetesUpgradeYBTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/SoftwareKubernetesUpgradeYBTest.java @@ -56,8 +56,8 @@ public class SoftwareKubernetesUpgradeYBTest extends KubernetesUpgradeTaskTest { ImmutableList.of( TaskType.CheckNodesAreSafeToTakeDown, TaskType.CheckUpgrade, - TaskType.FreezeUniverse, TaskType.UpdateConsistencyCheck, + TaskType.FreezeUniverse, TaskType.UpdateUniverseState, TaskType.KubernetesCommandExecutor, TaskType.CheckNodesAreSafeToTakeDown, diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/TlsToggleTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/TlsToggleTest.java index 07bfc81774f3..08427fc07e65 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/TlsToggleTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/TlsToggleTest.java @@ -500,8 +500,8 @@ public void testTlsNonRollingUpgrade( subTasks.stream().collect(Collectors.groupingBy(TaskInfo::getPosition)); int position = 0; - assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateConsistencyCheck); + assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); if (taskParams.enableNodeToNodeEncrypt || taskParams.enableClientToNodeEncrypt) { // Cert update tasks will be non rolling List certUpdateTasks = subTasksByPosition.get(position++); @@ -656,8 +656,8 @@ public void testTlsRollingUpgrade( subTasks.stream().collect(Collectors.groupingBy(TaskInfo::getPosition)); int position = 0; - assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateConsistencyCheck); + assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); if (taskParams.enableNodeToNodeEncrypt || taskParams.enableClientToNodeEncrypt) { // Cert update tasks will be non rolling List certUpdateTasks = subTasksByPosition.get(position++); diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/VMImageUpgradeTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/VMImageUpgradeTest.java index 9bfcacf99eb4..d48d16ae12d0 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/VMImageUpgradeTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/upgrade/VMImageUpgradeTest.java @@ -211,8 +211,8 @@ public void testVMImageUpgrade() { int position = 0; assertTaskType(subTasksByPosition.get(position++), TaskType.CheckNodesAreSafeToTakeDown); - assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateConsistencyCheck); + assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); List createRootVolumeTasks = subTasksByPosition.get(position++); assertTaskType(createRootVolumeTasks, TaskType.CreateRootVolumes); assertEquals(expectedRootVolumeCreationTasks, createRootVolumeTasks.size()); @@ -400,8 +400,8 @@ public void testVMImageUpgradeWithImageBundle() { int position = 0; assertTaskType(subTasksByPosition.get(position++), TaskType.CheckNodesAreSafeToTakeDown); - assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); assertTaskType(subTasksByPosition.get(position++), TaskType.UpdateConsistencyCheck); + assertTaskType(subTasksByPosition.get(position++), TaskType.FreezeUniverse); List createRootVolumeTasks = subTasksByPosition.get(position++); assertTaskType(createRootVolumeTasks, TaskType.CreateRootVolumes); assertEquals(expectedRootVolumeCreationTasks, createRootVolumeTasks.size()); From 0c410237ee40e16c62b651047840f1745e916e97 Mon Sep 17 00:00:00 2001 From: Kai Franz Date: Mon, 16 Sep 2024 14:18:52 -0700 Subject: [PATCH 61/75] [#23956] YSQL: Fix org.yb.pgsql.TestYsqlMetrics#testMetricRows Summary: D37636 adds table-level metrics to the `/statements` endpoint. This means that many metrics have the same name but a different `table_name` field. The parser in `Metrics.java` expects there to be only one metric with a given name, so if it sees the same name more than once, it overwrites the previous value for that metric. This revision modifies the metrics parser to skip over table-level metrics. Jira: DB-12855 Test Plan: ``` ./yb_build.sh --java-test 'org.yb.pgsql.TestYsqlMetrics#testMetricRows' ``` Reviewers: myang Reviewed By: myang Differential Revision: https://phorge.dev.yugabyte.com/D38103 --- java/yb-client/src/test/java/org/yb/minicluster/Metrics.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/java/yb-client/src/test/java/org/yb/minicluster/Metrics.java b/java/yb-client/src/test/java/org/yb/minicluster/Metrics.java index 2e72a7392a8f..3670791d3a1f 100644 --- a/java/yb-client/src/test/java/org/yb/minicluster/Metrics.java +++ b/java/yb-client/src/test/java/org/yb/minicluster/Metrics.java @@ -258,6 +258,10 @@ private void readMetrics(JsonObject obj) { map = new HashMap<>(); for (JsonElement subelem : obj.getAsJsonArray("metrics")) { JsonObject metric = subelem.getAsJsonObject(); + if (metric.has("table_name")) { + // Skip table-specific metrics. + continue; + } if (metric.has("value")) { JsonPrimitive value = metric.get("value").getAsJsonPrimitive(); if (value.isBoolean()) { From 388e0457f202af7545bedbf0b3a4f687a69cd6b4 Mon Sep 17 00:00:00 2001 From: Yamen Haddad <22749018+yamen-haddad@users.noreply.github.com> Date: Tue, 17 Sep 2024 00:14:15 +0000 Subject: [PATCH 62/75] [#21625,#21627] Docdb: Clear stale meta-cache entries at the end of clone Summary: As part of the clone workflow, we repartition all the tables of the target database that has been created by executing the dump script. This means removing the old tablets and creating new ones during the import snapshot phase. However, we saw some cases where the old tablets are cached in the meta-cache of the tserver that executed the schema creation script. Other tservers can also have these stale metacache entries. For example, as part of executing `CREATE INDEX`, we send `BACKFILL INDEX` queries to the tserves that host the base table tablets' leaders which populates the cache with old tablets. The stale meta-cache entries are used later to execute the queries that arrive to tservers. However, the stale tablets are deleted in the import snapshot phase which leads to the following error: ``` d3=# select count(*) from t2 where age<18; ERROR: LookupByIdRpc(tablet: 89b4445772d2415aa1702a77031b7d74, num_attempts: 2) failed: Tablet deleted: Not serving tablet deleted upon request at 2024-08-01 15:39:31 UTC ``` It is worth mentioning that we encounter this issue only in the first query that is executed in the tserver with stale metacache. If we retry the same query another time, it will work fine as the meta-cache has invalidated the stale entry. We saw this issue only in the colocated database when there is an index. This is because as part of executing `CREATE INDEX` command, we ask for the TableLocations of the parent colocated tablet. The diff fixes the problem by introducing a new tserver RPC `ClearMetaCacheEntriesForNamespace` which clears all the metacache entries (tables and tablets) related to the clone database. This RPC is sent to all tservers as part of clone workflow. More specifically, clearing the metacache happens at the final step of clone i.e. after successfully restoring the snapshot on the clone database but before enabling user connections to the database. User connections to the clone database are enabled after successfully clearing the stale metacache entries of all tservers. **Upgrade/Rollback safety** The diff adds a new RPC `ClearMetacache` that is only used in instant database cloning workflow currently. The clone feature is protected by the preview flag: `enable_db_clone`. Jira: DB-10520, DB-10522 Test Plan: ./yb_build.sh fastdebug --cxx-test integration-tests_minicluster-snapshot-test --gtest_filter Colocation/PgCloneTestWithColocatedDBParam.CloneAfterDropIndex/1 Also tested manually that the ClearMetacache is clearing only the entries that belong to one specific database using the end point: `:9000/api/v1/meta-cache` which shows the set of tablets in the metacache. I checked that the tablet `0000000000` is not cleared after executing the RPC as intented. Reviewers: asrivastava, mlillibridge Reviewed By: asrivastava Subscribers: yguan, ybase, slingam Differential Revision: https://phorge.dev.yugabyte.com/D37353 --- src/yb/client/client.cc | 4 ++ src/yb/client/client.h | 2 + src/yb/client/meta_cache.cc | 42 ++++++++++++++- src/yb/client/meta_cache.h | 2 + .../minicluster-snapshot-test.cc | 42 +++++++++++++++ src/yb/master/async_rpc_tasks.cc | 35 ++++++++++++ src/yb/master/async_rpc_tasks.h | 27 ++++++++++ src/yb/master/clone/clone_state_entity.cc | 8 +++ src/yb/master/clone/clone_state_entity.h | 9 ++++ .../master/clone/clone_state_manager-test.cc | 6 +++ src/yb/master/clone/clone_state_manager.cc | 54 ++++++++++++++++--- src/yb/master/clone/clone_state_manager.h | 3 ++ src/yb/master/clone/external_functions.h | 6 +++ src/yb/master/master_tserver.cc | 4 ++ src/yb/master/master_tserver.h | 2 + src/yb/server/monitored_task.h | 1 + src/yb/tserver/tablet_server.cc | 4 ++ src/yb/tserver/tablet_server.h | 2 + src/yb/tserver/tablet_server_interface.h | 2 + src/yb/tserver/tablet_service.cc | 15 ++++++ src/yb/tserver/tablet_service.h | 4 ++ src/yb/tserver/tserver.proto | 9 ++++ src/yb/tserver/tserver_service.proto | 4 ++ 23 files changed, 280 insertions(+), 7 deletions(-) diff --git a/src/yb/client/client.cc b/src/yb/client/client.cc index 765e5fc3aa8f..067b6b3aa0d3 100644 --- a/src/yb/client/client.cc +++ b/src/yb/client/client.cc @@ -2993,6 +2993,10 @@ void YBClient::ClearAllMetaCachesOnServer() { data_->meta_cache_->ClearAll(); } +Status YBClient::ClearMetacache(const std::string& namespace_id) { + return data_->meta_cache_->ClearCacheEntries(namespace_id); +} + bool YBClient::RefreshTabletInfoWithConsensusInfo( const tserver::TabletConsensusInfoPB& newly_received_info) { auto status = data_->meta_cache_->RefreshTabletInfoWithConsensusInfo(newly_received_info); diff --git a/src/yb/client/client.h b/src/yb/client/client.h index fc2f6284976a..75a449b6b3a2 100644 --- a/src/yb/client/client.h +++ b/src/yb/client/client.h @@ -1024,6 +1024,8 @@ class YBClient { void ClearAllMetaCachesOnServer(); + Status ClearMetacache(const std::string& namespace_id); + // Uses the TabletConsensusInfo piggybacked from a response to // refresh a RemoteTablet in metacache. Returns true if the // RemoteTablet was indeed refreshed, false otherwise. diff --git a/src/yb/client/meta_cache.cc b/src/yb/client/meta_cache.cc index b00e899ad3c0..75ac99fcde31 100644 --- a/src/yb/client/meta_cache.cc +++ b/src/yb/client/meta_cache.cc @@ -53,14 +53,17 @@ #include "yb/client/table.h" #include "yb/client/yb_table_name.h" +#include "yb/common/colocated_util.h" #include "yb/common/common_consensus_util.h" #include "yb/common/wire_protocol.h" +#include "yb/common/ysql_utils.h" #include "yb/gutil/map-util.h" #include "yb/gutil/ref_counted.h" #include "yb/gutil/strings/substitute.h" #include "yb/master/master_client.proxy.h" +#include "yb/master/sys_catalog_constants.h" #include "yb/rpc/rpc_fwd.h" @@ -2440,13 +2443,50 @@ std::future> MetaCache::LookupTabletByKeyFutur void MetaCache::ClearAll() { std::lock_guard lock(mutex_); - ts_cache_.clear(); tables_.clear(); tablets_by_id_.clear(); tablet_lookups_by_id_.clear(); deleted_tablets_.clear(); } +Status MetaCache::ClearCacheEntries(const std::string& namespace_id) { + std::lock_guard lock(mutex_); + LOG(INFO) << Format("Clearing MetaCache entries for namespace: $0", namespace_id); + // Stores the tables and tablets that belong to the namespace namespace_id + std::set db_tables_ids; + std::set db_tablets_ids; + for (const auto& [table_id, table_data] : tables_) { + // Escape sys catalog and parent table ids as they don't conform to a typical ysql table id + if (table_id == master::kSysCatalogTableId) { + continue; + } else if (IsColocationParentTableId(table_id)) { + db_tables_ids.insert(table_id); + continue; + } else if (VERIFY_RESULT(GetNamespaceIdFromYsqlTableId(table_id)) == namespace_id) { + VLOG(5) << Format( + "Marking table: $0 for clearing from metacache as it is part of namespace $1: ", table_id, + namespace_id); + for (const auto& [_, remote_tablet] : table_data.tablets_by_partition) { + // Do not clear the sys.catalog tablet + if (remote_tablet->tablet_id() != master::kSysCatalogTabletId) { + db_tablets_ids.insert(remote_tablet->tablet_id()); + } + } + db_tables_ids.insert(table_id); + } + } + for (const auto& table_id : db_tables_ids) { + VLOG(4) << Format("Erasing table: $0 from metacache", table_id); + tables_.erase(table_id); + } + for (const auto& tablet_id : db_tablets_ids) { + VLOG(4) << Format("Erasing tablet: $0 from metacache", tablet_id); + tablets_by_id_.erase(tablet_id); + tablet_lookups_by_id_.erase(tablet_id); + } + return Status::OK(); +} + LookupDataGroup::~LookupDataGroup() { std::vector leftovers; while (auto* d = lookups.Pop()) { diff --git a/src/yb/client/meta_cache.h b/src/yb/client/meta_cache.h index 23ef9373a4fb..5e5d5d1facdc 100644 --- a/src/yb/client/meta_cache.h +++ b/src/yb/client/meta_cache.h @@ -627,6 +627,8 @@ class MetaCache : public RefCountedThreadSafe { void ClearAll(); + Status ClearCacheEntries(const std::string& namespace_id); + // TabletConsensusInfo is piggybacked from the response of a TServer. // Returns Status::OK() if and only if the meta-cache was updated. Status RefreshTabletInfoWithConsensusInfo( diff --git a/src/yb/integration-tests/minicluster-snapshot-test.cc b/src/yb/integration-tests/minicluster-snapshot-test.cc index e0aa72d10923..4de99b12cff9 100644 --- a/src/yb/integration-tests/minicluster-snapshot-test.cc +++ b/src/yb/integration-tests/minicluster-snapshot-test.cc @@ -660,6 +660,7 @@ class PgCloneTest : public PostgresMiniClusterTest { std::unique_ptr source_conn_; const std::string kSourceNamespaceName = "testdb"; + const std::string kSourceTableName = "t1"; const std::string kTargetNamespaceName1 = "testdb_clone1"; const std::string kTargetNamespaceName2 = "testdb_clone2"; const MonoDelta kTimeout = MonoDelta::FromSeconds(30); @@ -788,6 +789,47 @@ TEST_P(PgCloneTestWithColocatedDBParam, YB_DISABLE_TEST_IN_SANITIZERS(CloneAfter ASSERT_EQ(row, kRows[0]); } +// The test is disabled in Sanitizers as ysql_dump fails in ASAN builds due to memory leaks +// inherited from pg_dump. +TEST_P(PgCloneTestWithColocatedDBParam, YB_DISABLE_TEST_IN_SANITIZERS(CloneAfterDropIndex)) { + // Clone to a time before a drop index and check that the index exists with correct data. + // 1. Create a table and load some data. + // 2. Create an index on the table. + // 3. Mark time t. + // 4. Drop index. + // 5. Clone the database as of time t. + // 6. Check the index exists in the clone with the correct data. + const std::vector> kRows = {{1, 10}}; + const std::string kIndexName = "t1_v_idx"; + + ASSERT_OK(source_conn_->ExecuteFormat( + "INSERT INTO t1 VALUES ($0, $1)", std::get<0>(kRows[0]), std::get<1>(kRows[0]))); + + ASSERT_OK(source_conn_->ExecuteFormat("CREATE INDEX $0 ON t1(value)", kIndexName)); + + // Scans should use the index now. + auto is_index_scan = ASSERT_RESULT( + source_conn_->HasIndexScan(Format("SELECT * FROM t1 where value=$0", std::get<1>(kRows[0])))); + LOG(INFO) << "Scans uses index scan " << is_index_scan; + ASSERT_TRUE(is_index_scan); + + auto clone_to_time = ASSERT_RESULT(GetCurrentTime()).ToInt64(); + ASSERT_OK(source_conn_->ExecuteFormat("DROP INDEX $0", kIndexName)); + + ASSERT_OK(source_conn_->ExecuteFormat( + "CREATE DATABASE $0 TEMPLATE $1 AS OF $2", kTargetNamespaceName1, kSourceNamespaceName, + clone_to_time)); + + // Verify table t1 exists in the clone database and that the index is used to fetch the data. + auto target_conn = ASSERT_RESULT(ConnectToDB(kTargetNamespaceName1)); + is_index_scan = ASSERT_RESULT( + target_conn.HasIndexScan(Format("SELECT * FROM t1 WHERE value=$0", std::get<1>(kRows[0])))); + ASSERT_TRUE(is_index_scan); + auto row = ASSERT_RESULT((target_conn.FetchRow( + Format("SELECT * FROM t1 WHERE value=$0", std::get<1>(kRows[0]))))); + ASSERT_EQ(row, kRows[0]); +} + TEST_F(PgCloneTest, YB_DISABLE_TEST_IN_SANITIZERS(TabletSplitting)) { const int kNumRows = 1000; diff --git a/src/yb/master/async_rpc_tasks.cc b/src/yb/master/async_rpc_tasks.cc index 10c514323d0c..c40e523d1c2e 100644 --- a/src/yb/master/async_rpc_tasks.cc +++ b/src/yb/master/async_rpc_tasks.cc @@ -2033,6 +2033,41 @@ bool AsyncClonePgSchema::SendRequest(int attempt) { MonoTime AsyncClonePgSchema::ComputeDeadline() { return deadline_; } +// ============================================================================ +// Class AsyncClearMetacache. +// ============================================================================ +AsyncClearMetacache::AsyncClearMetacache( + Master* master, ThreadPool* callback_pool, const std::string& permanent_uuid, + const std::string& namespace_id, ClearMetacacheCallbackType callback) + : RetrySpecificTSRpcTask( + master, callback_pool, permanent_uuid, /* async_task_throttler */ nullptr), + namespace_id(namespace_id), + callback_(callback) {} + +std::string AsyncClearMetacache::description() const { return "Async ClearMetacache RPC"; } + +void AsyncClearMetacache::HandleResponse(int attempt) { + Status resp_status = Status::OK(); + if (resp_.has_error()) { + resp_status = StatusFromPB(resp_.error().status()); + LOG(WARNING) << "Clear Metacache entries for namespace " << namespace_id + << " failed: " << resp_status; + TransitionToFailedState(state(), resp_status); + } else { + TransitionToCompleteState(); + } + WARN_NOT_OK(callback_(), "Failed to execute the callback of AsyncClearMetacache"); +} + +bool AsyncClearMetacache::SendRequest(int attempt) { + tserver::ClearMetacacheRequestPB req; + req.set_namespace_id(namespace_id); + ts_proxy_->ClearMetacacheAsync(req, &resp_, &rpc_, BindRpcCallback()); + VLOG_WITH_PREFIX(1) << Format( + "Sent clear metacache entries request of namespace: $0 to $1", namespace_id, tablet_id()); + return true; +} + // ============================================================================ // Class AsyncEnableDbConns. // ============================================================================ diff --git a/src/yb/master/async_rpc_tasks.h b/src/yb/master/async_rpc_tasks.h index aa2127fa8f9f..584cae449076 100644 --- a/src/yb/master/async_rpc_tasks.h +++ b/src/yb/master/async_rpc_tasks.h @@ -1096,6 +1096,33 @@ class AsyncClonePgSchema : public RetrySpecificTSRpcTask { ClonePgSchemaCallbackType callback_; }; +class AsyncClearMetacache : public RetrySpecificTSRpcTask { + public: + using ClearMetacacheCallbackType = std::function; + AsyncClearMetacache( + Master* master, ThreadPool* callback_pool, const std::string& permanent_uuid, + const std::string& namespace_id, ClearMetacacheCallbackType callback); + + server::MonitoredTaskType type() const override { + return server::MonitoredTaskType::kClearMetaCache; + } + + std::string type_name() const override { return "Clear all meta-caches of a tserver"; } + + std::string description() const override; + + protected: + void HandleResponse(int attempt) override; + bool SendRequest(int attempt) override; + // Not associated with a tablet. + TabletId tablet_id() const override { return TabletId(); } + + private: + std::string namespace_id; + tserver::ClearMetacacheResponsePB resp_; + ClearMetacacheCallbackType callback_; +}; + class AsyncEnableDbConns : public RetrySpecificTSRpcTask { public: using EnableDbConnsCallbackType = std::function; diff --git a/src/yb/master/clone/clone_state_entity.cc b/src/yb/master/clone/clone_state_entity.cc index fe5ea7713ce0..44279301d988 100644 --- a/src/yb/master/clone/clone_state_entity.cc +++ b/src/yb/master/clone/clone_state_entity.cc @@ -88,4 +88,12 @@ void CloneStateInfo::SetRestorationId(const TxnSnapshotRestorationId& restoratio restoration_id_ = restoration_id; } +std::shared_ptr CloneStateInfo::NumTserversWithStaleMetacache() { + return num_tservers_with_stale_metacache; +} + +void CloneStateInfo::SetNumTserversWithStaleMetacache(uint64_t count) { + num_tservers_with_stale_metacache = std::make_shared(count); +} + } // namespace yb::master diff --git a/src/yb/master/clone/clone_state_entity.h b/src/yb/master/clone/clone_state_entity.h index f91c88676464..b65c6280e737 100644 --- a/src/yb/master/clone/clone_state_entity.h +++ b/src/yb/master/clone/clone_state_entity.h @@ -19,6 +19,8 @@ #include "yb/master/catalog_entity_info.pb.h" #include "yb/master/sys_catalog.h" +#include "yb/util/countdown_latch.h" + namespace yb::master { struct PersistentCloneStateInfo : public Persistent {}; @@ -69,6 +71,9 @@ class CloneStateInfo : public MetadataCowWrapper { const TxnSnapshotRestorationId& RestorationId(); void SetRestorationId(const TxnSnapshotRestorationId& restoration_id); + std::shared_ptr NumTserversWithStaleMetacache(); + void SetNumTserversWithStaleMetacache(uint64_t count); + private: // The ID field is used in the sys_catalog table. const std::string clone_request_id_; @@ -84,6 +89,10 @@ class CloneStateInfo : public MetadataCowWrapper { // This is set before the clone state is set to RESTORING. TxnSnapshotRestorationId restoration_id_ GUARDED_BY(mutex_) = TxnSnapshotRestorationId::Nil(); + // The number of tservers that a Clear Metacache rpc has been sent to but didn't respond with + // success. Only enable connections to target DB after all tservers cleared thier metacache. + std::shared_ptr num_tservers_with_stale_metacache; + std::mutex mutex_; DISALLOW_COPY_AND_ASSIGN(CloneStateInfo); diff --git a/src/yb/master/clone/clone_state_manager-test.cc b/src/yb/master/clone/clone_state_manager-test.cc index c71e1c41cdbb..a9e245eb864c 100644 --- a/src/yb/master/clone/clone_state_manager-test.cc +++ b/src/yb/master/clone/clone_state_manager-test.cc @@ -111,6 +111,11 @@ class CloneStateManagerTest : public YBTest { const std::string& target_db_name, const std::string& source_owner, const std::string& target_owner, HybridTime restore_ht, AsyncClonePgSchema::ClonePgSchemaCallbackType callback, MonoTime deadline), (override)); + MOCK_METHOD( + Status, ScheduleClearMetaCacheTasks, + (const TSDescriptorVector& tservers, const std::string& namespace_id, + AsyncClearMetacache::ClearMetacacheCallbackType callback), + (override)); MOCK_METHOD( Status, ScheduleEnableDbConnectionsTask, (const std::string& permanent_uuid, const std::string& target_db_name, @@ -146,6 +151,7 @@ class CloneStateManagerTest : public YBTest { CoarseTimePoint deadline), (override)); MOCK_METHOD(Result, PickTserver, (), (override)); + MOCK_METHOD(TSDescriptorVector, GetTservers, (), (override)); }; private: diff --git a/src/yb/master/clone/clone_state_manager.cc b/src/yb/master/clone/clone_state_manager.cc index 454fbf151b0d..430464783ecf 100644 --- a/src/yb/master/clone/clone_state_manager.cc +++ b/src/yb/master/clone/clone_state_manager.cc @@ -122,6 +122,20 @@ class CloneStateManagerExternalFunctions : public CloneStateManagerExternalFunct return catalog_manager_->ScheduleTask(task); } + Status ScheduleClearMetaCacheTasks( + const TSDescriptorVector& tservers, const std::string& namespace_id, + AsyncClearMetacache::ClearMetacacheCallbackType callback) override { + for (const auto& ts : tservers) { + auto task = std::make_shared( + master_, catalog_manager_->AsyncTaskPool(), ts->permanent_uuid(), namespace_id, callback); + LOG(INFO) << Format( + "Scheduling clear metacache entries task for namespace: $0 and tserver with UUID: $1", + namespace_id, ts->permanent_uuid()); + RETURN_NOT_OK(catalog_manager_->ScheduleTask(task)); + } + return Status::OK(); + } + Status ScheduleEnableDbConnectionsTask( const TabletServerId& ts_uuid, const std::string& target_db_name, AsyncEnableDbConns::EnableDbConnsCallbackType callback) override { @@ -164,6 +178,10 @@ class CloneStateManagerExternalFunctions : public CloneStateManagerExternalFunct return tservers[0]; } + TSDescriptorVector GetTservers() override { + return catalog_manager_->GetAllLiveNotBlacklistedTServers(); + } + // Sys catalog. Status Upsert(int64_t leader_term, const CloneStateInfoPtr& clone_state) override { return sys_catalog_->Upsert(leader_term, clone_state); @@ -383,11 +401,10 @@ Status CloneStateManager::ClonePgSchemaObjects( // Pick one of the live tservers to send ysql_dump and ysqlsh requests to. auto ts = VERIFY_RESULT(external_funcs_->PickTserver()); - auto ts_permanent_uuid = ts->permanent_uuid(); // Deadline passed to the ClonePgSchemaTask (including rpc time and callback execution deadline) auto deadline = MonoTime::Now() + FLAGS_ysql_clone_pg_schema_rpc_timeout_ms * 1ms; RETURN_NOT_OK(external_funcs_->ScheduleClonePgSchemaTask( - ts_permanent_uuid, source_db_name, target_db_name, pg_source_owner, pg_target_owner, + ts->permanent_uuid(), source_db_name, target_db_name, pg_source_owner, pg_target_owner, HybridTime(clone_state->LockForRead()->pb.restore_time()), MakeDoneClonePgSchemaCallback( clone_state, snapshot_schedule_id, target_db_name, ToCoarse(deadline)), @@ -625,6 +642,28 @@ Status CloneStateManager::HandleCreatingState(const CloneStateInfoPtr& clone_sta return Status::OK(); } +Status CloneStateManager::ClearMetaCaches(const CloneStateInfoPtr& clone_state) { + auto callback = [this, clone_state]() -> Status { + auto num_tservers_with_stale_metacache = clone_state->NumTserversWithStaleMetacache(); + num_tservers_with_stale_metacache->CountDown(); + if (num_tservers_with_stale_metacache->count() == 0) { + RETURN_NOT_OK(EnableDbConnections(clone_state)); + } + return Status::OK(); + }; + NamespaceIdentifierPB target_namespace_identifier; + target_namespace_identifier.set_name(clone_state->LockForRead()->pb.target_namespace_name()); + target_namespace_identifier.set_database_type(YQL_DATABASE_PGSQL); + auto target_namespace_id = + VERIFY_RESULT(external_funcs_->FindNamespace(target_namespace_identifier))->id(); + + TSDescriptorVector running_tservers = external_funcs_->GetTservers(); + clone_state->SetNumTserversWithStaleMetacache(running_tservers.size()); + RETURN_NOT_OK(external_funcs_->ScheduleClearMetaCacheTasks( + running_tservers, target_namespace_id, callback)); + return Status::OK(); +} + Status CloneStateManager::EnableDbConnections(const CloneStateInfoPtr& clone_state) { auto callback = [this, clone_state](const Status& enable_db_conns_status) -> Status { @@ -634,6 +673,8 @@ Status CloneStateManager::EnableDbConnections(const CloneStateInfoPtr& clone_sta SCHECK_EQ(lock->pb.aggregate_state(), SysCloneStatePB::RESTORED, IllegalState, "Expected clone to be in restored state"); lock.mutable_data()->pb.set_aggregate_state(SysCloneStatePB::COMPLETE); + LOG(INFO) << Format( + "Marking clone of namespace: $0 as complete", lock->pb.source_namespace_id()); auto status = external_funcs_->Upsert(clone_state->Epoch().leader_term, clone_state); if (status.ok()) { lock.Commit(); @@ -644,11 +685,12 @@ Status CloneStateManager::EnableDbConnections(const CloneStateInfoPtr& clone_sta } return Status::OK(); }; - auto ts = VERIFY_RESULT(external_funcs_->PickTserver()); - auto ts_permanent_uuid = ts->permanent_uuid(); + LOG(INFO) << Format( + "Scheduling enable DB Connections Task for database:$0 ", + clone_state->LockForRead()->pb.target_namespace_name()); RETURN_NOT_OK(external_funcs_->ScheduleEnableDbConnectionsTask( - ts_permanent_uuid, clone_state->LockForRead()->pb.target_namespace_name(), callback)); + ts->permanent_uuid(), clone_state->LockForRead()->pb.target_namespace_name(), callback)); return Status::OK(); } @@ -670,7 +712,7 @@ Status CloneStateManager::HandleRestoringState(const CloneStateInfoPtr& clone_st lock.mutable_data()->pb.set_aggregate_state(SysCloneStatePB::RESTORED); RETURN_NOT_OK(external_funcs_->Upsert(clone_state->Epoch().leader_term, clone_state)); lock.Commit(); - return EnableDbConnections(clone_state); + return ClearMetaCaches(clone_state); } else { lock.mutable_data()->pb.set_aggregate_state(SysCloneStatePB::COMPLETE); RETURN_NOT_OK(external_funcs_->Upsert(clone_state->Epoch().leader_term, clone_state)); diff --git a/src/yb/master/clone/clone_state_manager.h b/src/yb/master/clone/clone_state_manager.h index 45dcf7822807..cdae9cd57f79 100644 --- a/src/yb/master/clone/clone_state_manager.h +++ b/src/yb/master/clone/clone_state_manager.h @@ -110,6 +110,9 @@ class CloneStateManager { const std::string& target_namespace_name, CoarseTimePoint deadline); + // Clear the metacaches from stale entries for all running tservers as they have stale tablets. + Status ClearMetaCaches(const CloneStateInfoPtr& clone_state); + Status EnableDbConnections(const CloneStateInfoPtr& clone_state); Status HandleCreatingState(const CloneStateInfoPtr& clone_state); diff --git a/src/yb/master/clone/external_functions.h b/src/yb/master/clone/external_functions.h index abf1bf93bcbb..14cb5288318f 100644 --- a/src/yb/master/clone/external_functions.h +++ b/src/yb/master/clone/external_functions.h @@ -61,6 +61,10 @@ class CloneStateManagerExternalFunctionsBase { AsyncClonePgSchema::ClonePgSchemaCallbackType callback, MonoTime deadline) = 0; + virtual Status ScheduleClearMetaCacheTasks( + const TSDescriptorVector& tservers, const std::string& namespace_id, + AsyncClearMetacache::ClearMetacacheCallbackType callback) = 0; + virtual Status ScheduleEnableDbConnectionsTask( const std::string& permanent_uuid, const std::string& target_db_name, AsyncEnableDbConns::EnableDbConnsCallbackType callback) = 0; @@ -82,6 +86,8 @@ class CloneStateManagerExternalFunctionsBase { virtual Result PickTserver() = 0; + virtual TSDescriptorVector GetTservers() = 0; + // Sys catalog. virtual Status Upsert(int64_t leader_term, const CloneStateInfoPtr&) = 0; virtual Status Upsert(int64_t leader_term, const CloneStateInfoPtr&, const NamespaceInfoPtr&) = 0; diff --git a/src/yb/master/master_tserver.cc b/src/yb/master/master_tserver.cc index 341a284cbfe2..edae2cf8f8f0 100644 --- a/src/yb/master/master_tserver.cc +++ b/src/yb/master/master_tserver.cc @@ -204,6 +204,10 @@ void MasterTabletServer::ClearAllMetaCachesOnServer() { client()->ClearAllMetaCachesOnServer(); } +Status MasterTabletServer::ClearMetacache(const std::string& namespace_id) { + return client()->ClearMetacache(namespace_id); +} + Status MasterTabletServer::YCQLStatementStats(const tserver::PgYCQLStatementStatsRequestPB& req, tserver::PgYCQLStatementStatsResponsePB* resp) const { LOG(FATAL) << "Unexpected call of YCQLStatementStats()"; diff --git a/src/yb/master/master_tserver.h b/src/yb/master/master_tserver.h index 51d882383cf7..19845b59c472 100644 --- a/src/yb/master/master_tserver.h +++ b/src/yb/master/master_tserver.h @@ -100,6 +100,8 @@ class MasterTabletServer : public tserver::TabletServerIf, void ClearAllMetaCachesOnServer() override; + Status ClearMetacache(const std::string& namespace_id) override; + Status YCQLStatementStats(const tserver::PgYCQLStatementStatsRequestPB& req, tserver::PgYCQLStatementStatsResponsePB* resp) const override; diff --git a/src/yb/server/monitored_task.h b/src/yb/server/monitored_task.h index 63caa58f11fd..f272c4e3b3be 100644 --- a/src/yb/server/monitored_task.h +++ b/src/yb/server/monitored_task.h @@ -66,6 +66,7 @@ YB_DEFINE_ENUM(MonitoredTaskType, (kBackfillTable) (kBackfillTabletChunk) (kChangeConfig) + (kClearMetaCache) (kClonePgSchema) (kCloneTablet) (kCreateReplica) diff --git a/src/yb/tserver/tablet_server.cc b/src/yb/tserver/tablet_server.cc index c53b0933e3f1..10c5ddf901e9 100644 --- a/src/yb/tserver/tablet_server.cc +++ b/src/yb/tserver/tablet_server.cc @@ -1514,6 +1514,10 @@ void TabletServer::ClearAllMetaCachesOnServer() { client()->ClearAllMetaCachesOnServer(); } +Status TabletServer::ClearMetacache(const std::string& namespace_id) { + return client()->ClearMetacache(namespace_id); +} + Result> TabletServer::GetLocalTabletsMetadata() const { std::vector result; auto peers = tablet_manager_.get()->GetTabletPeers(); diff --git a/src/yb/tserver/tablet_server.h b/src/yb/tserver/tablet_server.h index 8a9d4b504901..41fa6d288b28 100644 --- a/src/yb/tserver/tablet_server.h +++ b/src/yb/tserver/tablet_server.h @@ -372,6 +372,8 @@ class TabletServer : public DbServerBase, public TabletServerIf { void ClearAllMetaCachesOnServer() override; + Status ClearMetacache(const std::string& namespace_id) override; + Result> GetLocalTabletsMetadata() const override; void TEST_SetIsCronLeader(bool is_cron_leader); diff --git a/src/yb/tserver/tablet_server_interface.h b/src/yb/tserver/tablet_server_interface.h index 323badf6a827..418dfb71ef42 100644 --- a/src/yb/tserver/tablet_server_interface.h +++ b/src/yb/tserver/tablet_server_interface.h @@ -108,6 +108,8 @@ class TabletServerIf : public LocalTabletServer { virtual void ClearAllMetaCachesOnServer() = 0; + virtual Status ClearMetacache(const std::string& namespace_id) = 0; + virtual Status YCQLStatementStats(const tserver::PgYCQLStatementStatsRequestPB& req, tserver::PgYCQLStatementStatsResponsePB* resp) const = 0; diff --git a/src/yb/tserver/tablet_service.cc b/src/yb/tserver/tablet_service.cc index cabd5cad1a0a..cad64566a70e 100644 --- a/src/yb/tserver/tablet_service.cc +++ b/src/yb/tserver/tablet_service.cc @@ -3341,6 +3341,21 @@ void TabletServiceImpl::ClearAllMetaCachesOnServer( context.RespondSuccess(); } +void TabletServiceImpl::ClearMetacache( + const ClearMetacacheRequestPB* req, ClearMetacacheResponsePB* resp, rpc::RpcContext context) { + if (!req->has_namespace_id()) { + SetupErrorAndRespond( + resp->mutable_error(), STATUS(InvalidArgument, "namespace_id is not specified"), &context); + return; + } + auto s = server_->ClearMetacache(req->namespace_id()); + if (!s.ok()) { + SetupErrorAndRespond(resp->mutable_error(), s, &context); + } else { + context.RespondSuccess(); + } +} + void TabletServiceImpl::AcquireObjectLocks( const AcquireObjectLockRequestPB* req, AcquireObjectLockResponsePB* resp, rpc::RpcContext context) { diff --git a/src/yb/tserver/tablet_service.h b/src/yb/tserver/tablet_service.h index 2a570b99c5d2..286c84389686 100644 --- a/src/yb/tserver/tablet_service.h +++ b/src/yb/tserver/tablet_service.h @@ -215,6 +215,10 @@ class TabletServiceImpl : public TabletServerServiceIf, public ReadTabletProvide const ClearAllMetaCachesOnServerRequestPB* req, ClearAllMetaCachesOnServerResponsePB* resp, rpc::RpcContext context) override; + void ClearMetacache( + const ClearMetacacheRequestPB* req, ClearMetacacheResponsePB* resp, + rpc::RpcContext context) override; + void AcquireObjectLocks( const AcquireObjectLockRequestPB* req, AcquireObjectLockResponsePB* resp, rpc::RpcContext context) override; diff --git a/src/yb/tserver/tserver.proto b/src/yb/tserver/tserver.proto index 5b847c6b37e3..82edce3abf77 100644 --- a/src/yb/tserver/tserver.proto +++ b/src/yb/tserver/tserver.proto @@ -388,6 +388,15 @@ message ClearAllMetaCachesOnServerRequestPB {} message ClearAllMetaCachesOnServerResponsePB {} +message ClearMetacacheRequestPB { + // Only Ysql namespace can be cleared from metacache. + optional string namespace_id = 1; +} + +message ClearMetacacheResponsePB { + optional TabletServerErrorPB error = 1; +} + message ClearUniverseUuidRequestPB { } diff --git a/src/yb/tserver/tserver_service.proto b/src/yb/tserver/tserver_service.proto index 9dbcaab8d4e8..19a53dc6aa0a 100644 --- a/src/yb/tserver/tserver_service.proto +++ b/src/yb/tserver/tserver_service.proto @@ -126,6 +126,10 @@ service TabletServerService { rpc ClearAllMetaCachesOnServer(ClearAllMetaCachesOnServerRequestPB) returns (ClearAllMetaCachesOnServerResponsePB); + // Clear metacache entries (tables and tablets) that belong to the provided namespace. + rpc ClearMetacache(ClearMetacacheRequestPB) + returns (ClearMetacacheResponsePB); + rpc ClearUniverseUuid(ClearUniverseUuidRequestPB) returns (ClearUniverseUuidResponsePB); rpc AcquireObjectLocks(AcquireObjectLockRequestPB) From 5523770fcd6e4a1a45b7714203992a17efcde983 Mon Sep 17 00:00:00 2001 From: Hal Takahara <8877300+mtakahar@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:59:37 -0400 Subject: [PATCH 63/75] [#23547] YSQL: fix pg_hint_plan crash with pg_hint_plan.enable_hint_table enabled Summary: When hint_table is enabled, pg_hint_plan looks up the hint_table using the the current query string as the key, which is done in the extension callbacks: `post_parse_analyze_hook` and `planner_hook`. The query string is available in the `ParseState` when it is in `post_parse_analyze_hook` as it is passed down as an argument. On the other hand, `planner_hook` in **pg11** does not have access to the current query string, and `get_current_hint_string` was assuming that it's processing the top-level query. However, that is not always the case, and it would crash while normalizing the query if the source location of some `Const` nodes in the query tree is beyond the end of the query string. The solution is not to use the top-level query string if `ParseState` is unavailable and `stmt_location` and `stmt_len` in the Query node indicate that it may not be the top-level query but a subquery fragment or a query in a stored procedure. **pg15 note: the change to `pg_hint_plan.c` is not applicable to the pg15 variant because the planner_hook receives the correct query string along with the parse tree. No `get_current_hint_string` function there.** Jira: DB-12465 Test Plan: ./yb_build.sh --java-test 'org.yb.pgsql.TestPgRegressThirdPartyExtensionsPgHintPlan' Jenkins: test regex: .*TestPgRegressThirdPartyExtensionsPgHintPlan Reviewers: tnayak Reviewed By: tnayak Subscribers: smishra, fizaa, yql Differential Revision: https://phorge.dev.yugabyte.com/D38018 --- .../pg_hint_plan/expected/yb_hint_table.out | 10 ++++++++++ .../third-party-extensions/pg_hint_plan/pg_hint_plan.c | 6 ++++++ .../pg_hint_plan/sql/yb_hint_table.sql | 7 +++++++ .../third-party-extensions/pg_hint_plan/yb_schedule | 1 + 4 files changed, 24 insertions(+) create mode 100644 src/postgres/third-party-extensions/pg_hint_plan/expected/yb_hint_table.out create mode 100644 src/postgres/third-party-extensions/pg_hint_plan/sql/yb_hint_table.sql diff --git a/src/postgres/third-party-extensions/pg_hint_plan/expected/yb_hint_table.out b/src/postgres/third-party-extensions/pg_hint_plan/expected/yb_hint_table.out new file mode 100644 index 000000000000..8b674897720a --- /dev/null +++ b/src/postgres/third-party-extensions/pg_hint_plan/expected/yb_hint_table.out @@ -0,0 +1,10 @@ +CREATE EXTENSION pg_hint_plan; +SET pg_hint_plan.enable_hint_table TO on; +-- #23547 Ensure query against a view containing stored function doesn't crash +SELECT numeric_precision FROM information_schema.columns + WHERE table_schema = 'pg_catalog' AND table_name = 'pg_class' + AND column_name = 'relname'; + numeric_precision +------------------- + +(1 row) diff --git a/src/postgres/third-party-extensions/pg_hint_plan/pg_hint_plan.c b/src/postgres/third-party-extensions/pg_hint_plan/pg_hint_plan.c index 32ea15ed1bd2..44d86ea63497 100644 --- a/src/postgres/third-party-extensions/pg_hint_plan/pg_hint_plan.c +++ b/src/postgres/third-party-extensions/pg_hint_plan/pg_hint_plan.c @@ -1931,6 +1931,12 @@ get_query_string(ParseState *pstate, Query *query, Query **jumblequery) else if (!jumblequery && pstate && pstate->p_sourcetext != p && strcmp(pstate->p_sourcetext, p) != 0) p = NULL; + /* + * YB note: don't assume it is the top-level query when pstate is NULL and + * the query tree does not have the source location. + */ + else if (!pstate && query->stmt_location <= 0 && query->stmt_len <= 0) + p = NULL; return p; } diff --git a/src/postgres/third-party-extensions/pg_hint_plan/sql/yb_hint_table.sql b/src/postgres/third-party-extensions/pg_hint_plan/sql/yb_hint_table.sql new file mode 100644 index 000000000000..62dfe9910415 --- /dev/null +++ b/src/postgres/third-party-extensions/pg_hint_plan/sql/yb_hint_table.sql @@ -0,0 +1,7 @@ +CREATE EXTENSION pg_hint_plan; +SET pg_hint_plan.enable_hint_table TO on; + +-- #23547 Ensure query against a view containing stored function doesn't crash +SELECT numeric_precision FROM information_schema.columns + WHERE table_schema = 'pg_catalog' AND table_name = 'pg_class' + AND column_name = 'relname'; diff --git a/src/postgres/third-party-extensions/pg_hint_plan/yb_schedule b/src/postgres/third-party-extensions/pg_hint_plan/yb_schedule index 8c65d590652e..9b3dab1d4b22 100644 --- a/src/postgres/third-party-extensions/pg_hint_plan/yb_schedule +++ b/src/postgres/third-party-extensions/pg_hint_plan/yb_schedule @@ -2,3 +2,4 @@ test: yb_pg_init test: yb_pg_pg_hint_plan +test: yb_hint_table From 5951e18fd491368c3db57c8fb426f64e4d649a0c Mon Sep 17 00:00:00 2001 From: qhu Date: Wed, 11 Sep 2024 18:10:00 +0000 Subject: [PATCH 64/75] [#23881] docdb: Update the hint to advisory_locks Summary: With #19974, the requests for advisory locks fail with an error message. This diff updates the hint in the "advisory locks are not yet implemented" error message to indicate that there is a workaround in case the app doesn't need the strict functionality and the GH issue #3642 has the details of the workaround. Jira: DB-12785 Test Plan: ./yb_build.sh --cxx-test pgwrapper_pg_row_lock-test --gtest_filter PgRowLockTest.AdvisoryLocksNotSupported Reviewers: rthallam, smishra, pjain Reviewed By: rthallam, smishra Subscribers: yql, ybase Differential Revision: https://phorge.dev.yugabyte.com/D37988 --- src/postgres/src/backend/utils/adt/lockfuncs.c | 5 +++-- src/postgres/src/backend/utils/misc/guc.c | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/postgres/src/backend/utils/adt/lockfuncs.c b/src/postgres/src/backend/utils/adt/lockfuncs.c index b79af5ec785b..43ffceaf1d6f 100644 --- a/src/postgres/src/backend/utils/adt/lockfuncs.c +++ b/src/postgres/src/backend/utils/adt/lockfuncs.c @@ -32,8 +32,9 @@ YbPreventAdvisoryLocks(void) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("advisory locks are not yet implemented"), errhint( - "See https://github.com/yugabyte/yugabyte-db/issues/3642. " - "React with thumbs up to raise its priority"))); + "If the app doesn't need strict functionality, this error can be silenced " + "by using the GFlag yb_silence_advisory_locks_not_supported_error. " + "See https://github.com/yugabyte/yugabyte-db/issues/3642 for details."))); } /* This must match enum LockTagType! */ diff --git a/src/postgres/src/backend/utils/misc/guc.c b/src/postgres/src/backend/utils/misc/guc.c index 95323cebf7df..bd0f59fa8262 100644 --- a/src/postgres/src/backend/utils/misc/guc.c +++ b/src/postgres/src/backend/utils/misc/guc.c @@ -2068,7 +2068,8 @@ static struct config_bool ConfigureNamesBool[] = "Enable this with high caution. It was added to avoid disruption for users who were " "already using advisory locks but seeing success messages without the lock really being " "acquired. Such users should take the necessary steps to modify their application to " - "remove usage of advisory locks."), + "remove usage of advisory locks. See https://github.com/yugabyte/yugabyte-db/issues/3642 " + "for details."), GUC_NOT_IN_SAMPLE }, &yb_silence_advisory_locks_not_supported_error, From 10b500932b13408c1a8d1bfb2c2678ad57cea866 Mon Sep 17 00:00:00 2001 From: Premkumar Date: Tue, 17 Sep 2024 20:51:28 -0700 Subject: [PATCH 65/75] Minorfixes (#23986) * minor fixes * fix broken links * Apply suggestions from code review Co-authored-by: Dwight Hodge <79169168+ddhodge@users.noreply.github.com> --------- Co-authored-by: Dwight Hodge <79169168+ddhodge@users.noreply.github.com> --- docs/content/preview/drivers-orms/go/pg.md | 2 +- .../content/preview/drivers-orms/orms/rust/ysql-diesel.md | 2 +- docs/content/preview/faq/general.md | 8 ++++---- .../manage/data-migration/migrate-from-postgres.md | 8 ++++---- .../quick-start-buildapps-include.md | 2 +- .../prepare/server-nodes-software/_index.md | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/content/preview/drivers-orms/go/pg.md b/docs/content/preview/drivers-orms/go/pg.md index 6faab66f2cfa..89dbf7e2fb88 100644 --- a/docs/content/preview/drivers-orms/go/pg.md +++ b/docs/content/preview/drivers-orms/go/pg.md @@ -84,7 +84,7 @@ db := pg.Connect(opt) #### Use SSL -For a YugabyteDB Aeon cluster, or a YugabyteDB cluster with SSL/TLS enabled, set the following SSL-related environment variables at the client side. SSL/TLS is enabled by default for client-side authentication. Refer to [Configure SSL/TLS](yb-pgx-reference/#configure-ssl-tls) for the default and supported modes. +For a YugabyteDB Aeon cluster, or a YugabyteDB cluster with SSL/TLS enabled, set the following SSL-related environment variables at the client side. SSL/TLS is enabled by default for client-side authentication. Refer to [Configure SSL/TLS](../yb-pgx-reference/#configure-ssl-tls) for the default and supported modes. ```sh $ export PGSSLMODE=verify-ca diff --git a/docs/content/preview/drivers-orms/orms/rust/ysql-diesel.md b/docs/content/preview/drivers-orms/orms/rust/ysql-diesel.md index 7e2948ea973d..5fef2307a0bd 100644 --- a/docs/content/preview/drivers-orms/orms/rust/ysql-diesel.md +++ b/docs/content/preview/drivers-orms/orms/rust/ysql-diesel.md @@ -45,7 +45,7 @@ Build the REST API server (written using Diesel and Rocket) as follows: $ cargo build --release ``` -If you encounter a build failure, install [libpq](../../ysql-client-drivers/#libpq) and try again. +If you encounter a build failure, install [libpq](../../../ysql-client-drivers/#libpq) and try again. ## Set up the database connection diff --git a/docs/content/preview/faq/general.md b/docs/content/preview/faq/general.md index 2c405c8b3b53..fc59c6f4286f 100644 --- a/docs/content/preview/faq/general.md +++ b/docs/content/preview/faq/general.md @@ -97,19 +97,19 @@ Ensuring [ACID](../../architecture/key-concepts/#acid) transactions and full com - **Consistency vs. Latency**: YugabyteDB uses the [Raft](../../architecture/docdb-replication/raft) consensus algorithm for strong consistency in distributed systems. While this guarantees data integrity, it can result in higher write latency compared to eventually consistent databases like Cassandra. -- **Increased Query Latency**: Transactions and JOINs that span multiple nodes experience inter-node latency, making queries slower than in single-node databases like PostgreSQL. +- **Data Distribution vs Query Latency**: Transactions and JOINs that span multiple nodes experience inter-node latency, making queries slower than in single-node databases like PostgreSQL. {{}} [Many projects](https://github.com/yugabyte/yugabyte-db?tab=readme-ov-file#current-roadmap) are currently in progress to match the performance of a single-node database. {{}} -- **Cross-Region Latency**: In multi-region or globally distributed setups, YugabyteDB replicates data across regions to ensure availability and resilience. However, this can lead to higher write latency due to cross-region coordination. +- **Multi-Region vs Latency**: In multi-region or globally distributed setups, YugabyteDB replicates data across regions to ensure availability and resilience. However, this can lead to higher write latency due to cross-region coordination. -- **Resource Requirements**: Being a distributed database, YugabyteDB demands more hardware and networking resources to maintain high availability and fault tolerance compared to traditional monolithic databases that run on a single machine. +- **Availability vs Resource Requirements**: Being a distributed database, YugabyteDB demands more hardware and networking resources to maintain high availability and fault tolerance compared to traditional monolithic databases that run on a single machine. - **PostgreSQL Feature Support**: Every new PostgreSQL feature must be optimized for distributed environments, which is not a simple task. Be sure to verify that the PostgreSQL features your application relies on are supported in the current version of YugabyteDB. -{{}} + {{}} ### What is a YugabyteDB universe diff --git a/docs/content/preview/manage/data-migration/migrate-from-postgres.md b/docs/content/preview/manage/data-migration/migrate-from-postgres.md index d221187827dd..16138aa6930f 100644 --- a/docs/content/preview/manage/data-migration/migrate-from-postgres.md +++ b/docs/content/preview/manage/data-migration/migrate-from-postgres.md @@ -225,7 +225,7 @@ Regardless of how much data you decide to migrate, you can choose from the follo For more details, see [Offline migration](/preview/yugabyte-voyager/migrate/migrate-steps/). {{}} -**Live migration**: Live migration aims to minimize downtime by keeping the application running during the migration process. Data is copied from the source database to the target database while the application is still live, and a final switchover is made after the migration is complete. +**Live migration**: Live migration aims to minimize downtime by keeping the application running during the migration process. Data is copied from the source database to the new YugabyteDB cluster while the application is still live, and a final switchover is made after the migration is complete. {{}} For more details, see [Live migration](/preview/yugabyte-voyager/migrate/live-migrate/). @@ -237,7 +237,7 @@ For more details, see [Live migration](/preview/yugabyte-voyager/migrate/live-mi For more details, see [Live migration with fall-forward](/preview/yugabyte-voyager/migrate/live-fall-forward/). {{}} -**Live migration with fall-back**: Live migration with fall-back provides a safety net by allowing a return to the original database if issues are encountered after the cutover to the new database. This strategy involves maintaining bidirectional synchronization between the source and target databases for a period after the migration. +**Live migration with fall-back**: Live migration with fall-back provides a safety net by allowing a return to the original database if issues are encountered after the cutover to the new database. This strategy involves maintaining bidirectional synchronization between the source database and the new YugabyteDB cluster for a period after the migration. {{}} For more details, see [Live migration with fall-back](/preview/yugabyte-voyager/migrate/live-fall-back/). @@ -294,7 +294,7 @@ For more information, see [Verify migration](../verify-migration-ysql/). ### Monitoring -Regularly monitor the target database to ensure it is performing efficiently. This includes tracking metrics such as query execution times, CPU usage, memory consumption, and disk I/O. Pay close attention to any errors or warnings that arise, as they can indicate potential issues with the database configuration, queries, or underlying infrastructure. +Regularly monitor the new YugabyteDB cluster to ensure it is performing efficiently. This includes tracking metrics such as query execution times, CPU usage, memory consumption, and disk I/O. Pay close attention to any errors or warnings that arise, as they can indicate potential issues with the database configuration, queries, or underlying infrastructure. {{}} To learn more about the various useful metrics that can be monitored, see [Metrics](../../../launch-and-manage/monitor-and-alert/metrics/). @@ -327,7 +327,7 @@ To understand the various schemes of backup, see [Backup and restore](../../back ### Decommissioning -Before proceeding with decommissioning, thoroughly verify the stability and reliability of the target database. Ensure that it is functioning as expected, with no critical errors, performance issues, or compatibility problems. Once confident in the stability of the target database and after securing necessary backups, proceed with decommissioning the source database. This involves shutting down the source system and ensuring that it is no longer accessible to users or applications. +Before proceeding with decommissioning, thoroughly verify the stability and reliability of the new YugabyteDB cluster. Ensure that it is functioning as expected, with no critical errors, performance issues, or compatibility problems. Once confident in the stability of the new YugabyteDB cluster and after securing necessary backups, proceed with decommissioning the source database. This involves shutting down the source system and ensuring that it is no longer accessible to users or applications. ## Learn more diff --git a/docs/content/preview/quick-start-yugabytedb-managed/quick-start-buildapps-include.md b/docs/content/preview/quick-start-yugabytedb-managed/quick-start-buildapps-include.md index 272a24e8aadf..b99be70db1a7 100644 --- a/docs/content/preview/quick-start-yugabytedb-managed/quick-start-buildapps-include.md +++ b/docs/content/preview/quick-start-yugabytedb-managed/quick-start-buildapps-include.md @@ -605,7 +605,7 @@ You have successfully executed a basic Ruby application that works with Yugabyte {{% tab header="Rust" lang="rust" %}} -The [Rust application](https://github.com/yugabyte/yugabyte-simple-rust-app) connects to a YugabyteDB cluster using the [Rust-Postgres driver](/preview/drivers-orms/yb-rust-postgres/) and performs basic SQL operations. Use the application as a template to get started with YugabyteDB in Rust. +The [Rust application](https://github.com/yugabyte/yugabyte-simple-rust-app) connects to a YugabyteDB cluster using the [Rust-Postgres driver](/preview/drivers-orms/rust/yb-rust-postgres/) and performs basic SQL operations. Use the application as a template to get started with YugabyteDB in Rust. The application requires the following: diff --git a/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/_index.md b/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/_index.md index 79af7090e5de..8e149aabf35f 100644 --- a/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/_index.md +++ b/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/_index.md @@ -22,7 +22,7 @@ Depending on the [provider type](../../yba-overview/#provider-configurations) an {{< warning title="Using disk encryption software with YugabyteDB" >}} If you are using third party disk encryption software, such as Vormetric or CipherTrust, the disk encryption service must be up and running on the node before starting any YugabyteDB services. If YugabyteDB processes start _before_ the encryption service, restarting an already encrypted node can result in data corruption. -To avoid problems, [pause the universe](../../../manage-deployments/delete-universe/#pause-a-universe) _before_ enabling or disabling the disk encryption service on universe nodes. +To avoid problems, [pause the universe](../../manage-deployments/delete-universe/#pause-a-universe) _before_ enabling or disabling the disk encryption service on universe nodes. {{< /warning >}} ##### Linux OS From 3d33b3eebb6cccb07be4ad218f2622b068f39331 Mon Sep 17 00:00:00 2001 From: Shubham Date: Tue, 17 Sep 2024 13:41:09 +0530 Subject: [PATCH 66/75] [PLAt-15133][PLAT-15332] Fix the preflight check for disk mount Summary: This diff fixes - 1. Preflight checks for disk mount. 2. Formatting for preflight checks. Test Plan: Manually verified the preflight checks Reviewers: anijhawan, nbhatia, skhilar Reviewed By: anijhawan, skhilar Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38117 --- .../ynp/commands/provision_command.py | 10 +++--- .../configure_os/templates/precheck.j2 | 31 ++++++++++--------- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/managed/node-agent/resources/ynp/commands/provision_command.py b/managed/node-agent/resources/ynp/commands/provision_command.py index c2644681290b..c3467a7cd26b 100644 --- a/managed/node-agent/resources/ynp/commands/provision_command.py +++ b/managed/node-agent/resources/ynp/commands/provision_command.py @@ -76,16 +76,16 @@ def _run_script(self, script_path): def add_results_helper(self, file): file.write(""" # Initialize the JSON results array - json_results='{"results":[' + json_results='{\n"results":[\n' add_result() { local check="$1" local result="$2" local message="$3" - if [ "${#json_results}" -gt 12 ]; then - json_results+=',' + if [ "${#json_results}" -gt 20 ]; then + json_results+=',\n' fi - json_results+='{"check":"'$check'","result":"'$result'","message":"'$message'"}' + json_results+=' {\n "check": "'$check'",\n "result": "'$result'",\n "message": "'$message'"\n }' } """) @@ -96,7 +96,7 @@ def print_results_helper(self, file): if [[ $json_results == *'"result":"FAIL"'* ]]; then any_fail=1 fi - json_results+=']}' + json_results+='\n]}' # Output the JSON echo "$json_results" diff --git a/managed/node-agent/resources/ynp/modules/provision/configure_os/templates/precheck.j2 b/managed/node-agent/resources/ynp/modules/provision/configure_os/templates/precheck.j2 index 631de4af7319..5fe1ec0513b6 100644 --- a/managed/node-agent/resources/ynp/modules/provision/configure_os/templates/precheck.j2 +++ b/managed/node-agent/resources/ynp/modules/provision/configure_os/templates/precheck.j2 @@ -89,9 +89,10 @@ fi threshold=49 #Gigabytes # Convert the space-separated string to an array in bash -IFS=' ' read -r -a mount_points_array <<< {{ mount_points }} +mount_points="{{ mount_points | default('') | trim }}" +IFS=' ' read -ra mount_points_array <<< "$mount_points" # Verify each mount point -for mount_point in "${mount_point_array[@]}"; do +for mount_point in "${mount_points_array[@]}"; do if [ -d "$mount_point" ]; then if [ -w "$mount_point" ] && [ $(( $(stat -c %a "$mount_point") % 10 & 2 )) -ne 0 ]; then result="PASS" @@ -111,19 +112,21 @@ for mount_point in "${mount_point_array[@]}"; do fi add_result "$mount_point Check" "$result" "$message" - # Get the available disk space in gigabytes. - free_space_gb=$(df -BG --output=avail "$MOUNT_POINT" | tail -n 1 | tr -d 'G ') - if [ "$free_space_gb" -gt "$threshold" ]; then - result="PASS" - message="Sufficient disk space available: ${AVAILABLE}G" - echo "[PASS] $message" - else - result="FAIL" - message="Insufficient disk space: ${free_space_gb}G available, ${threshold}G required" - echo "[FAIL] $message" - any_fail=1 + if [ -d "$mount_point" ]; then + # Get the available disk space in gigabytes. + free_space_gb=$(df -BG --output=avail "$mount_point" | tail -n 1 | tr -d 'G ') + if [ "$free_space_gb" -gt "$threshold" ]; then + result="PASS" + message="Sufficient disk space available: ${free_space_gb}G" + echo "[PASS] $message" + else + result="FAIL" + message="Insufficient disk space: ${free_space_gb}G available, ${threshold}G required" + echo "[FAIL] $message" + any_fail=1 + fi + add_result "$mount_point Free space check" "$result" "$message" fi - add_result "$mount_point Free space check" "$result" "$message" done platform_id=$(grep -oP '(?<=^PLATFORM_ID=).+' /etc/os-release | tr -d '"') From 9d8366b9d1a0245a6a130349b0b1359ddf4c698c Mon Sep 17 00:00:00 2001 From: Shubham Date: Tue, 17 Sep 2024 17:21:09 +0530 Subject: [PATCH 67/75] [PLAT-15345] Set 755 permissions on node-agent service file Summary: Node agent systemd service not restarting post node reboot in case of YNP deployment. This diff sets the permission to 755 on the node-agent service file. Test Plan: Verified node-agent is running post VM reboot Reviewers: anijhawan, nbhatia, nsingh, skhilar Reviewed By: nsingh Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38119 --- managed/node-agent/resources/node-agent-installer.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/managed/node-agent/resources/node-agent-installer.sh b/managed/node-agent/resources/node-agent-installer.sh index bd04144a6be1..15c3cc9eb3bd 100755 --- a/managed/node-agent/resources/node-agent-installer.sh +++ b/managed/node-agent/resources/node-agent-installer.sh @@ -357,6 +357,9 @@ EOF [Install] WantedBy=multi-user.target EOF + # Set the permissions after file creation. This is needed so that the service file + # is executable during restart of systemd unit. + chmod 755 "$SERVICE_FILE_PATH" fi echo "* Starting the systemd service" From d298d4406060fa07aa9e714337bda1d4012ac98e Mon Sep 17 00:00:00 2001 From: Bvsk Patnaik Date: Thu, 29 Aug 2024 08:15:22 -0700 Subject: [PATCH 68/75] [#22135] YSQL: Avoid read restart errors with ANALYZE Summary: In the current state of the database, ANALYZE can run for a long time on large tables. This long duration increases the chances of errors. We want to minimize such error situations since running analyze again when there is an error can be expensive. First, we tackle the read restart errors. ANALYZE does not require strict read-after-commit-visibility guarantee, i.e. slightly stale reads are not an issue for the ANALYZE operation. Therefore, we want to avoid these errors for ANALYZE in particular. For this reason, we do not use an ambiguity window (i.e. collapse the ambiguity window to a single point) for ANALYZE. Moreover, in the current state of the database, DDLs are executed in a "special" transaction separate from the usual transaction code path. This means that multi-table ANALYZE operations such as `ANALYZE;` use a single read point for the entirety of the operation. This is undesirable since there may be a lot of tables in the database and that increases the risk of a snapshot too old error. For this reason, we explicitly pass a fresh read time for ANALYZE of each table from the Pg layer to the tserver proxy. Pg does not exhibit this problem since (a) it runs ANALYZE of each table in a separate transaction (b) it does not cleanup MVCC records that are in use. Jira: DB-11062 Test Plan: Jenkins #### Test 1 ``` ./yb_build.sh --cxx-test pg_analyze_read_time-test --gtest_filter PgAnalyzeReadTimeTest.InsertRowsConcurrentlyWithAnalyze ``` Insert rows concurrently with analyze to trigger read restart errors. #### Test 2 ``` ./yb_build.sh --cxx-test pg_analyze_read_time-test --gtest_filter PgAnalyzeReadTimeTest.AnalyzeMultipleTables ``` Analyze two tables and do a full compaction between the two analyze. #### Test 3 ```lang=sh $ ./bin/ysqlsh yugabyte=# create table keys(k int); CREATE TABLE ... concurrently insert rows using ysql_bench and wait for a while yugabyte=# analyze keys; ... fails with a read restart error prior to this change but not with this change. ``` To insert rows concurrently use the following sql script ```name=insert.sql,lang=sql \set random_id random(1, 1000000) INSERT INTO keys (k) VALUES (:random_id); ``` Ran ysql_bench using ```lang=sh build/latest/postgres/bin/ysql_bench -t 100000 -f ../insert.sql -n -R 200 ``` Reviewers: pjain, bkolagani, yguan Reviewed By: bkolagani, yguan Subscribers: ybase, smishra, svc_phabricator, steve.varnau, yql Differential Revision: https://phorge.dev.yugabyte.com/D37648 --- src/yb/yql/pggate/pg_sample.cc | 33 ++++- src/yb/yql/pggate/pg_sample.h | 7 +- src/yb/yql/pggate/pggate.cc | 3 +- src/yb/yql/pgwrapper/CMakeLists.txt | 1 + .../pgwrapper/pg_analyze_read_time-test.cc | 137 ++++++++++++++++++ 5 files changed, 173 insertions(+), 8 deletions(-) create mode 100644 src/yb/yql/pgwrapper/pg_analyze_read_time-test.cc diff --git a/src/yb/yql/pggate/pg_sample.cc b/src/yb/yql/pggate/pg_sample.cc index 473c0912a6ba..1c610f9e0d0e 100644 --- a/src/yb/yql/pggate/pg_sample.cc +++ b/src/yb/yql/pggate/pg_sample.cc @@ -20,8 +20,15 @@ #include #include +#include "yb/common/read_hybrid_time.h" + +#include "yb/util/atomic.h" + #include "yb/gutil/casts.h" +DEFINE_test_flag(int64, delay_after_table_analyze_ms, 0, + "Add this delay after each table is analyzed."); + namespace yb::pggate { // Internal class to work as the secondary_index_query_ to select sample tuples. @@ -30,8 +37,9 @@ namespace yb::pggate { class PgSamplePicker : public PgSelectIndex { public: PgSamplePicker( - PgSession::ScopedRefPtr pg_session, const PgObjectId& table_id, bool is_region_local) - : PgSelectIndex(std::move(pg_session), table_id, is_region_local) {} + PgSession::ScopedRefPtr pg_session, const PgObjectId& table_id, bool is_region_local, + HybridTime read_time) + : PgSelectIndex(std::move(pg_session), table_id, is_region_local), read_time_(read_time) {} Status Prepare() override { target_ = PgTable(VERIFY_RESULT(LoadTable())); @@ -39,6 +47,9 @@ class PgSamplePicker : public PgSelectIndex { auto read_op = ArenaMakeShared( arena_ptr(), &arena(), *target_, is_region_local_, pg_session_->metrics().metrics_capture()); + // Use the same time as PgSample. Otherwise, ybctids may be gone + // when PgSample tries to fetch the rows. + read_op->set_read_time(ReadHybridTime::SingleTime(read_time_)); read_req_ = std::shared_ptr(read_op, &read_op->read_request()); doc_op_ = std::make_shared(pg_session_, &target_, std::move(read_op)); return Status::OK(); @@ -100,6 +111,7 @@ class PgSamplePicker : public PgSelectIndex { } Result GetEstimatedRowCount() const { + AtomicFlagSleepMs(&FLAGS_TEST_delay_after_table_analyze_ms); return down_cast(doc_op_.get())->GetEstimatedRowCount(); } @@ -110,12 +122,15 @@ class PgSamplePicker : public PgSelectIndex { bool reservoir_ready_ = false; // Vector of Slices pointing to the values in the reservoir std::vector ybctids_; + // Use the same read time on the ybctid sampler as the row fetcher. + HybridTime read_time_; }; PgSample::PgSample( PgSession::ScopedRefPtr pg_session, - int targrows, const PgObjectId& table_id, bool is_region_local) - : PgDmlRead(pg_session, table_id, is_region_local), targrows_(targrows) {} + int targrows, const PgObjectId& table_id, bool is_region_local, HybridTime read_time) + : PgDmlRead(pg_session, table_id, is_region_local), targrows_(targrows), + read_time_(read_time) {} Status PgSample::Prepare() { // Setup target and bind descriptor. @@ -124,16 +139,23 @@ Status PgSample::Prepare() { // Setup sample picker as secondary index query secondary_index_query_ = std::make_unique( - pg_session_, table_id_, is_region_local_); + pg_session_, table_id_, is_region_local_, read_time_); RETURN_NOT_OK(secondary_index_query_->Prepare()); // Prepare read op to fetch rows auto read_op = ArenaMakeShared( arena_ptr(), &arena(), *target_, is_region_local_, pg_session_->metrics().metrics_capture()); + // Clamp the read uncertainty window to avoid read restart errors. + read_op->set_read_time(ReadHybridTime::SingleTime(read_time_)); read_req_ = std::shared_ptr(read_op, &read_op->read_request()); doc_op_ = make_shared(pg_session_, &target_, std::move(read_op)); + VLOG_WITH_FUNC(3) + << "Sampling table: " << target_->table_name().table_name() + << " for " << targrows_ << " rows" + << " using read time: " << read_time_; + return Status::OK(); } @@ -157,4 +179,3 @@ Result PgSample::SamplePicker() { } } // namespace yb::pggate - diff --git a/src/yb/yql/pggate/pg_sample.h b/src/yb/yql/pggate/pg_sample.h index e3a071159132..71d514b44f7b 100644 --- a/src/yb/yql/pggate/pg_sample.h +++ b/src/yb/yql/pggate/pg_sample.h @@ -14,6 +14,8 @@ #pragma once +#include "yb/common/hybrid_time.h" + #include "yb/yql/pggate/pg_select_index.h" #include "yb/yql/pggate/pg_tools.h" @@ -29,7 +31,7 @@ class PgSample : public PgDmlRead { public: PgSample( PgSession::ScopedRefPtr pg_session, int targrows, const PgObjectId& table_id, - bool is_region_local); + bool is_region_local, HybridTime read_time); StmtOp stmt_op() const override { return StmtOp::STMT_SAMPLE; } @@ -52,6 +54,9 @@ class PgSample : public PgDmlRead { // How many sample rows are needed const int targrows_; + + // Holds the read time used for executing ANALYZE on the table. + HybridTime read_time_; }; } // namespace yb::pggate diff --git a/src/yb/yql/pggate/pggate.cc b/src/yb/yql/pggate/pggate.cc index 02835a0876e1..975682e05618 100644 --- a/src/yb/yql/pggate/pggate.cc +++ b/src/yb/yql/pggate/pggate.cc @@ -1411,7 +1411,8 @@ Status PgApiImpl::NewSample(const PgObjectId& table_id, bool is_region_local, PgStatement **handle) { *handle = nullptr; - auto sample = std::make_unique(pg_session_, targrows, table_id, is_region_local); + auto sample = std::make_unique(pg_session_, targrows, table_id, is_region_local, + clock_->Now()); RETURN_NOT_OK(sample->Prepare()); RETURN_NOT_OK(AddToCurrentPgMemctx(std::move(sample), handle)); return Status::OK(); diff --git a/src/yb/yql/pgwrapper/CMakeLists.txt b/src/yb/yql/pgwrapper/CMakeLists.txt index 35e438cb1b16..028116c44478 100644 --- a/src/yb/yql/pgwrapper/CMakeLists.txt +++ b/src/yb/yql/pgwrapper/CMakeLists.txt @@ -113,6 +113,7 @@ ADD_YB_TEST(colocation-test) ADD_YB_TEST(geo_transactions-test) ADD_YB_TEST(geo_transactions_promotion-test) ADD_YB_TEST(pg_alter_add_column_default-test) +ADD_YB_TEST(pg_analyze_read_time-test) ADD_YB_TEST(pg_ash-test) ADD_YB_TEST(pg_auto_analyze-test) ADD_YB_TEST(pg_backends-test) diff --git a/src/yb/yql/pgwrapper/pg_analyze_read_time-test.cc b/src/yb/yql/pgwrapper/pg_analyze_read_time-test.cc new file mode 100644 index 000000000000..15a43c5d9bd0 --- /dev/null +++ b/src/yb/yql/pgwrapper/pg_analyze_read_time-test.cc @@ -0,0 +1,137 @@ +// Copyright (c) YugabyteDB, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License +// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +// or implied. See the License for the specific language governing permissions and limitations +// under the License. + +#include +#include + +#include "yb/common/pgsql_error.h" +#include "yb/util/flags.h" + +#include "yb/yql/pgwrapper/pg_mini_test_base.h" +#include "yb/yql/pgwrapper/pg_test_utils.h" + +DECLARE_string(ysql_pg_conf_csv); +DECLARE_string(ysql_log_statement); +DECLARE_bool(ysql_beta_features); +DECLARE_string(vmodule); +DECLARE_int32(timestamp_history_retention_interval_sec); +DECLARE_int64(TEST_delay_after_table_analyze_ms); + +namespace yb::pgwrapper { + +class PgAnalyzeReadTimeTest : public PgMiniTestBase { + public: + void SetUp() override { + // ANALYZE is a beta feature. + ANNOTATE_UNPROTECTED_WRITE(FLAGS_ysql_beta_features) = true; + // Easier debugging. + // ASSERT_OK(SET_FLAG(vmodule, "read_query=1")); + ANNOTATE_UNPROTECTED_WRITE(FLAGS_ysql_log_statement) = "all"; + PgMiniTestBase::SetUp(); + } +}; + +class PgAnalyzeNoReadRestartsTest : public PgAnalyzeReadTimeTest { + public: + void SetUp() override { + // So that read restart errors are not retried internally. + ANNOTATE_UNPROTECTED_WRITE(FLAGS_ysql_pg_conf_csv) = + MaxQueryLayerRetriesConf(0); + PgAnalyzeReadTimeTest::SetUp(); + } +}; + +TEST_F_EX(PgAnalyzeReadTimeTest, InsertRowsConcurrentlyWithAnalyze, PgAnalyzeNoReadRestartsTest) { + constexpr auto kNumInitialRows = 100000; + + // Create table with keys from 1 to kNumInitialRows. + auto setup_conn = ASSERT_RESULT(Connect()); + ASSERT_OK(setup_conn.Execute("CREATE TABLE keys (k INT) SPLIT INTO 3 TABLETS")); + ASSERT_OK(setup_conn.ExecuteFormat( + "INSERT INTO keys(k) SELECT GENERATE_SERIES(1, $0)", kNumInitialRows)); + + // Warm the catalog cache so that subsequent inserts are fast. + // Unfortunately, this is necessary because this test depends on timing. + auto insert_conn = ASSERT_RESULT(Connect()); + auto key = kNumInitialRows; + // Populates catalog cache. + key++; + ASSERT_OK(insert_conn.ExecuteFormat( + "INSERT INTO keys(k) VALUES ($0)", key)); + + std::atomic stop{false}; + CountDownLatch begin_analyze(1); + auto analyze_conn = ASSERT_RESULT(Connect()); + auto analyze_status_future = std::async(std::launch::async, [&] { + begin_analyze.Wait(); + auto status = analyze_conn.Execute("ANALYZE keys"); + stop.store(true); + return status; + }); + + begin_analyze.CountDown(); + while (!stop.load() && key < kNumInitialRows + 100) { + key++; + ASSERT_OK(insert_conn.ExecuteFormat( + "INSERT INTO keys(k) VALUES ($0)", key)); + + // Throttle inserts to avoid overloading the system. + std::this_thread::sleep_for(10ms); + } + + ASSERT_OK(analyze_status_future.get()); +} + +class PgAnalyzeMultiTableTest : public PgAnalyzeReadTimeTest { + public: + void SetUp() override { + ANNOTATE_UNPROTECTED_WRITE( + FLAGS_timestamp_history_retention_interval_sec) = 0; + // This test is timing based and 10s provides enough time for compaction. + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_delay_after_table_analyze_ms) = 10000; + PgAnalyzeReadTimeTest::SetUp(); + } +}; + +TEST_F_EX(PgAnalyzeReadTimeTest, AnalyzeMultipleTables, PgAnalyzeMultiTableTest) { + constexpr auto kNumInitialRows = 10000; + + // Create table with keys from 1 to kNumInitialRows. + auto setup_conn = ASSERT_RESULT(Connect()); + ASSERT_OK(setup_conn.Execute("CREATE TABLE keys (k INT)")); + ASSERT_OK(setup_conn.ExecuteFormat( + "INSERT INTO keys(k) SELECT GENERATE_SERIES(1, $0)", kNumInitialRows)); + ASSERT_OK(setup_conn.Execute("CREATE TABLE values (v INT)")); + ASSERT_OK(setup_conn.ExecuteFormat( + "INSERT INTO values(v) SELECT GENERATE_SERIES(1, $0)", kNumInitialRows)); + + auto update_conn = ASSERT_RESULT(Connect()); + auto analyze_conn = ASSERT_RESULT(Connect()); + + CountDownLatch update_thread_started(1); + auto update_status_future = std::async(std::launch::async, [&] { + update_thread_started.CountDown(); + auto status = update_conn.Execute("UPDATE values SET v = v + 1"); + FlushAndCompactTablets(); + LOG(INFO) << "Compaction done!"; + return status; + }); + + update_thread_started.Wait(); + auto analyze_status = analyze_conn.Execute("ANALYZE keys, values"); + ASSERT_OK(analyze_status); + LOG(INFO) << "Analyze done!"; + + ASSERT_OK(update_status_future.get()); +} + +} // namespace yb::pgwrapper From 240e8f03ef3edf562e81fb47b5a3543a6dc7874d Mon Sep 17 00:00:00 2001 From: Aman Nijhawan Date: Tue, 17 Sep 2024 22:47:24 +0000 Subject: [PATCH 69/75] [PLAT-15355] Fix Node Addition Precheck logic to work correctly in case provider_id is missing Test Plan: manually tested on a node by patching ynp code. Reviewers: svarshney Reviewed By: svarshney Subscribers: svarshney, yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38143 --- .../resources/ynp/configs/config.j2 | 2 +- .../node_agent/templates/precheck.j2 | 23 ++++++++++++++++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/managed/node-agent/resources/ynp/configs/config.j2 b/managed/node-agent/resources/ynp/configs/config.j2 index fde8056d7396..21bbf3c952c5 100644 --- a/managed/node-agent/resources/ynp/configs/config.j2 +++ b/managed/node-agent/resources/ynp/configs/config.j2 @@ -69,7 +69,7 @@ ports = 7000 7100 9000 9100 18018 22 5433 9042 9070 9300 12000 13000 [InstallNodeAgent] {{ render_section(yba) -}} tmp_directory = {{ ynp.tmp_directory }} -node_ip = {{ ynp.node_external_fqdn }} +node_ip = {{ yba.node_external_fqdn }} bind_ip = {{ ynp.node_ip }} [RebootNode] diff --git a/managed/node-agent/resources/ynp/modules/provision/node_agent/templates/precheck.j2 b/managed/node-agent/resources/ynp/modules/provision/node_agent/templates/precheck.j2 index 972ce4d43cb4..8342a49bdb9a 100644 --- a/managed/node-agent/resources/ynp/modules/provision/node_agent/templates/precheck.j2 +++ b/managed/node-agent/resources/ynp/modules/provision/node_agent/templates/precheck.j2 @@ -3,6 +3,14 @@ yba_url={{ url }} customer_uuid={{ customer_uuid }} token={{ api_key }} provider_id={{ provider_id }} +provider_name={{ provider_name }} + +_get_provider_by_name_url() { + local yba_url=$1 + local customer_uuid=$2 + local provider_name=$3 + echo "${yba_url}/api/v1/customers/${customer_uuid}/providers?name={$provider_name}" +} _get_nodes_in_provider() { local yba_url=$1 @@ -60,12 +68,25 @@ else add_result "Memory Usage Check" "FAIL" "MemoryCurrent is not greater than 0: $memory" fi +if [ -z "$provider_id" ]; then + # Perform GET request to fetch provider to get provider_uuid + get_provider_url=$(_get_provider_by_name_url $yba_url $customer_uuid $provider_name) + response=$(curl -s -w "%{http_code}" -o response_provider.json -X GET \ + "${header_options[@]}" $tls_verify_option "$get_provider_url") + http_status="${response:(-3)}" + if [ "$http_status" -ge 200 ] && [ "$http_status" -lt 300 ]; then + echo "HTTP GET request successful. Processing response for provider" + provider_id=$(awk -F'"uuid":' '{print $2}' response_provider.json | awk -F'"' '{print $2}' | head -n 1) + fi +fi + # Perform GET request to fetch all the nodes associated with provider get_nodes_in_provider=$(_get_nodes_in_provider $yba_url $customer_uuid $provider_id) response=$(curl -s -w "%{http_code}" -o response.txt -X GET \ "${header_options[@]}" $tls_verify_option "$get_nodes_in_provider") http_status="${response:(-3)}" response_body=$( Date: Fri, 13 Sep 2024 05:53:55 +0000 Subject: [PATCH 70/75] [PLAT-15262]Add more checks for non-namespace scope supported universes Summary: 1. Added check to ensure non-namespace scope supported universes are not allowed to pass Namespaced services in overrides. 2. Moved the subtask location for Namespaced service handling. Earlier we were re-owning and deleting namespaced services before creating new ones. Now moved it to after new services are created and just before the current pods are deleted. Test Plan: Verified manually using following scenarios: # If universe uses old naming style, verified Namespaced scope services are not allowed in overrides list. # Verified corner case scenario with empty serviceEndpoints array works correctly: `serviceEndpoints: []` # Verified corner case scenario with serviceEndpoints override not applied as overrides works correctly. # Verified the subtask position move from before new Pods creation to after new Pods creation works correctly. Reviewers: anijhawan, #yba-api-review Reviewed By: anijhawan, #yba-api-review Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38031 --- .../tasks/EditKubernetesUniverse.java | 16 +++--- .../yugabyte/yw/common/KubernetesUtil.java | 54 ++++++++++++++----- .../handlers/UniverseCRUDHandler.java | 12 ++--- .../tasks/EditKubernetesUniverseTest.java | 8 +-- .../UniverseCreateControllerTestBase.java | 1 + 5 files changed, 61 insertions(+), 30 deletions(-) diff --git a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/EditKubernetesUniverse.java b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/EditKubernetesUniverse.java index 8aa41089337b..f55762bc044e 100644 --- a/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/EditKubernetesUniverse.java +++ b/managed/src/main/java/com/yugabyte/yw/commissioner/tasks/EditKubernetesUniverse.java @@ -380,14 +380,6 @@ private boolean editCluster( PlacementInfoUtil.addPlacementZone(currAZs, activeZones); } - // Handle Namespaced services ownership change/delete - addHandleKubernetesNamespacedServices( - false /* readReplicaDelete */, - taskParams(), - taskParams().getUniverseUUID(), - true /* handleOwnershipChanges */) - .setSubTaskGroupType(SubTaskGroupType.KubernetesHandleNamespacedService); - if (!mastersToAdd.isEmpty()) { // Bring up new masters and update the configs. // No need to check mastersToRemove as total number of masters is invariant. @@ -484,6 +476,14 @@ private boolean editCluster( createWaitForLoadBalanceTask().setSubTaskGroupType(SubTaskGroupType.WaitForDataMigration); } + // Handle Namespaced services ownership change/delete + addHandleKubernetesNamespacedServices( + false /* readReplicaDelete */, + taskParams(), + taskParams().getUniverseUUID(), + true /* handleOwnershipChanges */) + .setSubTaskGroupType(SubTaskGroupType.KubernetesHandleNamespacedService); + String universeOverrides = primaryCluster.userIntent.universeOverrides; Map azOverrides = primaryCluster.userIntent.azOverrides; if (azOverrides == null) { diff --git a/managed/src/main/java/com/yugabyte/yw/common/KubernetesUtil.java b/managed/src/main/java/com/yugabyte/yw/common/KubernetesUtil.java index eef8d11ad099..ea7f17821f4d 100644 --- a/managed/src/main/java/com/yugabyte/yw/common/KubernetesUtil.java +++ b/managed/src/main/java/com/yugabyte/yw/common/KubernetesUtil.java @@ -39,6 +39,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Objects; import java.util.Set; import java.util.UUID; import java.util.function.Function; @@ -759,6 +760,9 @@ private static Map> getFinalOverrides( universeOverrides = mapper.readValue(universeOverridesStr, Map.class); } + if (CollectionUtils.isEmpty(placementInfo.cloudList)) { + return result; + } Map cloudConfig = CloudInfoInterface.fetchEnvVars(provider); for (PlacementRegion pr : placementInfo.cloudList.get(0).regionList) { Region region = Region.getOrBadRequest(pr.uuid); @@ -804,7 +808,7 @@ private static Set getNamespacesInCluster( if (cluster.userIntent.providerType != CloudType.kubernetes) { continue; } - if (clusterType != cluster.clusterType) { + if ((clusterType != null) && (clusterType != cluster.clusterType)) { continue; } PlacementInfo pi = cluster.placementInfo; @@ -855,7 +859,7 @@ public static Map>> generateNamespaceAZOve if (cluster.userIntent.providerType != CloudType.kubernetes) { continue; } - if (clusterType != cluster.clusterType) { + if ((clusterType != null) && (clusterType != cluster.clusterType)) { continue; } Map> finalOverrides = @@ -920,8 +924,10 @@ private static Map> getServicesFromOverrides( /** * Generate Map of Namespace and Per-AZ Namespace scoped services. Returns empty map for AZs where - * serviceEndpoints is not defined. Returns null for AZs where serviceEndpoints is defined but - * empty. + * serviceEndpoints is not defined. Returns null for AZs where serviceEndpoints is defined as + * empty array. For helm default overrides( i.e. no overrides specified ):
+ * 1. Returns null for default scope "AZ"
+ * 2. Returns empty map for default scope "Namespaced" * * @param universeParams * @return Generated namespaced NS scope services in the form <Namespace, <AZ_uuid, @@ -934,6 +940,9 @@ private static Map> getServicesFromOverrides( throws IOException { Map>> namespaceAZOverrides = generateNamespaceAZOverridesMap(universeParams, clusterType); + if (namespaceAZOverrides == null) { + return null; + } Map>>> nsNamespacedServices = new HashMap<>(); String defaultScopeUserIntent = universeParams.getPrimaryCluster().userIntent.defaultServiceScopeAZ ? "AZ" : "Namespaced"; @@ -945,8 +954,13 @@ private static Map> getServicesFromOverrides( azOverridesEntry -> { Map override = azOverridesEntry.getValue(); Map> services = getServicesFromOverrides(override); - if (services == null) { + if (services == null + || (MapUtils.isEmpty(services) && defaultScopeUserIntent.equals("AZ"))) { azNamespacedServicesMap.put(azOverridesEntry.getKey(), null /* empty Services */); + } else if (MapUtils.isEmpty(services) + && defaultScopeUserIntent.equals("Namespaced")) { + azNamespacedServicesMap.put( + azOverridesEntry.getKey(), new HashMap>()); } else { Map> namespacedServices = services.entrySet().stream() @@ -967,11 +981,16 @@ private static Map> getServicesFromOverrides( return scope.equals("Namespaced"); }) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + // Set null if using default helm overrides and userIntent service scope is "AZ" + if (MapUtils.isEmpty(namespacedServices)) { + namespacedServices = null; + } azNamespacedServicesMap.put(azOverridesEntry.getKey(), namespacedServices); } }); nsNamespacedServices.put(namespace, azNamespacedServicesMap); } + log.debug("Namespaced services: {}", nsNamespacedServices); return nsNamespacedServices; } @@ -1224,9 +1243,20 @@ private static void validateConflictingService(UniverseDefinitionTaskParams univ public static void validateServiceEndpoints( UniverseDefinitionTaskParams universeParams, Map universeConfig) throws IOException { - // Return if should not configure if (!shouldConfigureNamespacedService(universeParams, universeConfig)) { - log.debug("Universe configuration does not support Namespace scoped services, skipping"); + Map>>> nsScopedServices = + getNamespaceNSScopedServices(universeParams, null /* clusterType */); + if (nsScopedServices == null) { + return; + } + boolean nsServicePresent = + nsScopedServices.values().stream() + .flatMap(azsMap -> azsMap.values().stream()) + .anyMatch(Objects::nonNull); + if (nsServicePresent) { + throw new RuntimeException( + "Universe configuration does not support Namespace scoped services"); + } return; } // Validate service name does not appear twice in final overrides per AZ. @@ -1250,11 +1280,6 @@ public static void validateUpgradeServiceEndpoints( UniverseDefinitionTaskParams universeParams, Map universeConfig) throws IOException { - // Return if should not configure - if (!shouldConfigureNamespacedService(universeParams, universeConfig)) { - log.debug("Universe configuration does not support Namespace scoped services, skipping"); - return; - } UniverseDefinitionTaskParams taskParams = Json.fromJson(Json.toJson(universeParams), UniverseDefinitionTaskParams.class); taskParams.getPrimaryCluster().userIntent.universeOverrides = newUniverseOverrides; @@ -1262,6 +1287,11 @@ public static void validateUpgradeServiceEndpoints( // Validate new overrides for service endpoint independently validateServiceEndpoints(taskParams, universeConfig); + // Return if not supported + if (!shouldConfigureNamespacedService(taskParams, universeConfig)) { + return; + } + String defaultScope = universeParams.getPrimaryCluster().userIntent.defaultServiceScopeAZ ? "AZ" : "Namespaced"; diff --git a/managed/src/main/java/com/yugabyte/yw/controllers/handlers/UniverseCRUDHandler.java b/managed/src/main/java/com/yugabyte/yw/controllers/handlers/UniverseCRUDHandler.java index fa970e421141..9a2aa3793c25 100644 --- a/managed/src/main/java/com/yugabyte/yw/controllers/handlers/UniverseCRUDHandler.java +++ b/managed/src/main/java/com/yugabyte/yw/controllers/handlers/UniverseCRUDHandler.java @@ -941,12 +941,12 @@ public UniverseResp createUniverse(Customer customer, UniverseDefinitionTaskPara // Default service scope should be 'Namespaced' primaryIntent.defaultServiceScopeAZ = false; } - // Validate service endpoints - try { - KubernetesUtil.validateServiceEndpoints(taskParams, universe.getConfig()); - } catch (IOException e) { - throw new RuntimeException("Failed to parse Kubernetes overrides!", e.getCause()); - } + } + // Validate service endpoints + try { + KubernetesUtil.validateServiceEndpoints(taskParams, universe.getConfig()); + } catch (IOException e) { + throw new RuntimeException("Failed to parse Kubernetes overrides!", e.getCause()); } } else { if (primaryCluster.userIntent.enableIPV6) { diff --git a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditKubernetesUniverseTest.java b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditKubernetesUniverseTest.java index 7760948cfc3d..0a7d5cff8ba3 100644 --- a/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditKubernetesUniverseTest.java +++ b/managed/src/test/java/com/yugabyte/yw/commissioner/tasks/EditKubernetesUniverseTest.java @@ -214,12 +214,12 @@ private void setupUniverseMultiAZ(boolean setMasters, int numTservers) { ImmutableList.of( TaskType.CheckLeaderlessTablets, TaskType.FreezeUniverse, - TaskType.HandleKubernetesNamespacedServices, TaskType.KubernetesCommandExecutor, TaskType.KubernetesCheckNumPod, TaskType.KubernetesCommandExecutor, TaskType.WaitForServer, TaskType.UpdatePlacementInfo, + TaskType.HandleKubernetesNamespacedServices, TaskType.KubernetesCommandExecutor, TaskType.InstallingThirdPartySoftware, TaskType.UpdateUniverseIntent, @@ -228,7 +228,6 @@ private void setupUniverseMultiAZ(boolean setMasters, int numTservers) { private List getExpectedAddPodTaskResults() { return ImmutableList.of( - Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of("commandType", HELM_UPGRADE.name())), @@ -236,6 +235,7 @@ private List getExpectedAddPodTaskResults() { Json.toJson(ImmutableMap.of("commandType", POD_INFO.name())), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), + Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of("commandType", POD_INFO.name())), Json.toJson(ImmutableMap.of()), Json.toJson(ImmutableMap.of()), @@ -247,9 +247,9 @@ private List getExpectedAddPodTaskResults() { ImmutableList.of( TaskType.CheckLeaderlessTablets, TaskType.FreezeUniverse, - TaskType.HandleKubernetesNamespacedServices, TaskType.UpdatePlacementInfo, TaskType.WaitForDataMove, + TaskType.HandleKubernetesNamespacedServices, TaskType.CheckNodeSafeToDelete, TaskType.KubernetesCommandExecutor, TaskType.KubernetesCheckNumPod, @@ -284,8 +284,8 @@ private List getExpectedRemovePodTaskResults() { ImmutableList.of( TaskType.CheckLeaderlessTablets, TaskType.FreezeUniverse, - TaskType.HandleKubernetesNamespacedServices, TaskType.UpdatePlacementInfo, + TaskType.HandleKubernetesNamespacedServices, TaskType.CheckUnderReplicatedTablets, TaskType.CheckNodesAreSafeToTakeDown, TaskType.KubernetesCommandExecutor, diff --git a/managed/src/test/java/com/yugabyte/yw/controllers/UniverseCreateControllerTestBase.java b/managed/src/test/java/com/yugabyte/yw/controllers/UniverseCreateControllerTestBase.java index 77ba88112073..331b8ddb8e1a 100644 --- a/managed/src/test/java/com/yugabyte/yw/controllers/UniverseCreateControllerTestBase.java +++ b/managed/src/test/java/com/yugabyte/yw/controllers/UniverseCreateControllerTestBase.java @@ -780,6 +780,7 @@ public void testUniverseCreateDeviceInfoValidation( node.placementUuid = cluster.uuid; bodyJson.set("clusters", Json.newArray().add(Json.toJson(cluster))); bodyJson.set("nodeDetailsSet", Json.newArray().add(Json.toJson(node))); + bodyJson.put("nodePrefix", "demo-node"); if (errorMessage == null) { Result result = sendCreateRequest(bodyJson); From f957dda848d87c0d0f7ee84c02f790742cb2f4cc Mon Sep 17 00:00:00 2001 From: Piyush Jain Date: Tue, 17 Sep 2024 22:24:24 -0700 Subject: [PATCH 71/75] [Docs] Minor fixes to docs pages around transactions (#23926) 1. Remove remaining usage of the words "optimistic" and "pessimistic" for concurrency control modes in distributed-transactions-ysql.md. 2. Fix information around serialization errors and deadlock errors in transactions-errorcodes-ysql.md and transactions-retries-ysql.md. 3. Replace usage of ysql_max_write_restart_attempts with yb_max_query_layer_retries in concurrency-control.md. --------- Co-authored-by: Dwight Hodge --- .../transactions/concurrency-control.md | 8 ++--- .../transactions-errorcodes-ysql.md | 14 ++++++++- .../transactions/transactions-retries-ysql.md | 29 ++++++++++--------- .../distributed-transactions-ysql.md | 4 +-- .../transactions/concurrency-control.md | 8 ++--- .../transactions-errorcodes-ysql.md | 14 ++++++++- .../transactions/transactions-retries-ysql.md | 29 ++++++++++--------- .../distributed-transactions-ysql.md | 4 +-- .../transactions/transactions-retries-ysql.md | 2 +- .../transactions/transactions-retries-ysql.md | 2 +- 10 files changed, 72 insertions(+), 42 deletions(-) diff --git a/docs/content/preview/architecture/transactions/concurrency-control.md b/docs/content/preview/architecture/transactions/concurrency-control.md index 8f4a089fa5c4..b7e29767ab2a 100644 --- a/docs/content/preview/architecture/transactions/concurrency-control.md +++ b/docs/content/preview/architecture/transactions/concurrency-control.md @@ -329,12 +329,12 @@ After a transaction T1 (that was waiting for other transactions) unblocks, it co ### Examples -The following examples describe different use cases detailing the Wait-on-Conflict behavior. +The following examples describe different use cases detailing the Wait-on-Conflict behavior. To run the examples, you need to do the following: -1. Note that the examples require you to set the YB-TServer flag `enable_wait_queues=true`. -1. Also, set the YB-TServer flag `ysql_max_write_restart_attempts=0` to disable internal query layer retries on conflict. This is done to illustrate the `Wait-on-Conflict` concurrency control semantics separately without query layer retries. It is not recommended to disable these retries in production. +1. Set the YB-TServer flag `enable_wait_queues=true`. +1. Set the per-session `yb_max_query_layer_retries=0` YSQL configuration parameter to disable internal query layer retries on conflict. This is done to illustrate the `Wait-on-Conflict` concurrency control semantics separately without query layer retries. It is not recommended to disable these retries in production. To set it at the cluster level, use the `ysql_pg_conf_csv` YB-TServer flag. -A restart is necessary for these flags to take effect. +A restart is necessary for the flags to take effect. Start by setting up the table you'll use in all of the examples in this section. diff --git a/docs/content/preview/develop/learn/transactions/transactions-errorcodes-ysql.md b/docs/content/preview/develop/learn/transactions/transactions-errorcodes-ysql.md index f289f628a7ad..0d9d684668de 100644 --- a/docs/content/preview/develop/learn/transactions/transactions-errorcodes-ysql.md +++ b/docs/content/preview/develop/learn/transactions/transactions-errorcodes-ysql.md @@ -82,13 +82,25 @@ The client can reconnect to the server and retry the transaction. This error occurs when a transaction cannot be applied or progress further because of other conflicting transactions. For example, when multiple transactions are modifying the same key. ```output -ERROR: 40001: Operation expired: Transaction XXXX expired or aborted by a conflict +ERROR: could not serialize access due to concurrent update (...) ``` {{}} Serialization failure errors can be retried by the client. See [Client-side retry](../transactions-retries-ysql/#client-side-retry). {{}} +## 40P01: Deadlock detected + +This error occurs when two or more transactions wait on each other to form a deadlock cycle. One or more of the transactions in the cycle are aborted and they fail with the following error. + +```output +ERROR: deadlock detected (...) +``` + +{{}} +Deadlock detected errors can be retried by the client. See [Client-side retry](../transactions-retries-ysql/#client-side-retry). +{{}} + ## 2D000: Invalid transaction termination This error occurs when a transaction is terminated either by a `COMMIT` or a `ROLLBACK` in an invalid location. For example, when a `COMMIT` is issued inside a stored procedure that is called from inside a transaction. diff --git a/docs/content/preview/develop/learn/transactions/transactions-retries-ysql.md b/docs/content/preview/develop/learn/transactions/transactions-retries-ysql.md index 53c30f56afe2..0c44dd3c4a1d 100644 --- a/docs/content/preview/develop/learn/transactions/transactions-retries-ysql.md +++ b/docs/content/preview/develop/learn/transactions/transactions-retries-ysql.md @@ -39,7 +39,7 @@ Follow the [setup instructions](../../../../explore#tabs-00-00) to start a singl ## Automatic retries -YugabyteDB retries failed transactions automatically on the server side whenever possible without client intervention as per the [concurrency control policies](../../../../architecture/transactions/concurrency-control/#best-effort-internal-retries-for-first-statement-in-a-transaction). This is the case even for single statements, which are implicitly considered transactions. In [Read Committed](../../../../explore/transactions/isolation-levels/#read-committed-isolation) isolation mode, the server retries indefinitely. +YugabyteDB retries failed transactions automatically on the server side whenever possible without client intervention as per the [concurrency control policies](../../../../architecture/transactions/concurrency-control/#best-effort-internal-retries-for-first-statement-in-a-transaction). This is the case even for single statements, which are implicitly considered transactions. In some scenarios, a server-side retry is not suitable. For example, the retry limit has been reached or the transaction is not in a valid state. In these cases, it is the client's responsibility to retry the transaction at the application layer. @@ -76,19 +76,11 @@ If the `COMMIT` is successful, the program exits the loop. `attempt < max_attemp ##### 40001 - SerializationFailure -SerializationFailure errors happen when multiple transactions are updating the same set of keys (conflict) or when transactions are waiting on each other (deadlock). The error messages could be one of the following types: +SerializationFailure errors happen when multiple transactions are updating the same set of keys (conflict). During a conflict, certain transactions are retried. -- During a conflict, certain transactions are retried. However, after the retry limit is reached, an error occurs as follows: - - ```output - ERROR: 40001: All transparent retries exhausted. - ``` - -- All transactions are given a dynamic priority. When a deadlock is detected, the transaction with lower priority is automatically killed. For this scenario, the client might receive a message similar to the following: - - ```output - ERROR: 40001: Operation expired: Heartbeat: Transaction XXXX expired or aborted by a conflict - ``` +```output +ERROR: could not serialize access due to concurrent update (...) +``` The correct way to handle this error is with a retry loop with exponential backoff, as described in [Client-side retry](#client-side-retry). When the [UPDATE](../../../../api/ysql/the-sql-language/statements/dml_update/) or [COMMIT](../../../../api/ysql/the-sql-language/statements/txn_commit/) fails because of `SerializationFailure`, the code retries after waiting for `sleep_time` seconds, up to `max_attempts`. @@ -98,6 +90,17 @@ In read committed isolation level, as the server retries internally, the client Another way to handle these failures is would be to rollback to a checkpoint before the failed statement and proceed further as described in [Savepoints](#savepoints). +##### 40001 - Deadlock detected + +This error occurs when two or more transactions wait on each other to form a deadlock cycle. One or more of the transactions in the cycle are aborted +and they fail with the following error: + +```output +ERROR: deadlock detected (...) +``` + +Retries to handle this error are similar to serialization errors (40001). + ## Savepoints [Savepoints](../../../../api/ysql/the-sql-language/statements/savepoint_create/) are named checkpoints that can be used to rollback just a few statements, and then proceed with the transaction, rather than aborting the entire transaction when there is an error. diff --git a/docs/content/preview/explore/transactions/distributed-transactions-ysql.md b/docs/content/preview/explore/transactions/distributed-transactions-ysql.md index b55b29dea9d2..255561d0f581 100644 --- a/docs/content/preview/explore/transactions/distributed-transactions-ysql.md +++ b/docs/content/preview/explore/transactions/distributed-transactions-ysql.md @@ -156,9 +156,9 @@ Each update performed as a part of the transaction is replicated across multiple ### Concurrency control -[Concurrency control](../../../architecture/transactions/concurrency-control/) in databases ensures that multiple transactions can execute concurrently while preserving data integrity. Concurrency control is essential for correctness in environments where two or more transactions can access the same data at the same time. The two primary mechanisms to achieve concurrency control are optimistic and pessimistic. +[Concurrency control](../../../architecture/transactions/concurrency-control/) in databases ensures that multiple transactions can execute concurrently while preserving data integrity. Concurrency control is essential for correctness in environments where two or more transactions can access the same data at the same time. YugabyteDB currently supports two concurrency control mechanisms: Wait-on-Conflict and Fail-on-Conflict. -YugabyteDB currently supports optimistic concurrency control, with pessimistic concurrency control being worked on actively. +These are orthogonal to isolation levels (except for Read Committed isolation which has the same behaviour in both modes barring some limitations and performance penalty in Fail-on-Conflict mode). Also note that the Wait-on-Conflict concurrency control mode exactly matches PostgreSQL semantics and also gives better performance. ## Transaction options diff --git a/docs/content/stable/architecture/transactions/concurrency-control.md b/docs/content/stable/architecture/transactions/concurrency-control.md index 7ae2f476e064..e23afc0d8507 100644 --- a/docs/content/stable/architecture/transactions/concurrency-control.md +++ b/docs/content/stable/architecture/transactions/concurrency-control.md @@ -329,12 +329,12 @@ After a transaction T1 (that was waiting for other transactions) unblocks, it co ### Examples -The following examples describe different use cases detailing the Wait-on-Conflict behavior. +The following examples describe different use cases detailing the Wait-on-Conflict behavior. To run the examples, you need to do the following: -1. Note that the examples require you to set the YB-TServer flag `enable_wait_queues=true`. -1. Also, set the YB-TServer flag `ysql_max_write_restart_attempts=0` to disable internal query layer retries on conflict. This is done to illustrate the `Wait-on-Conflict` concurrency control semantics separately without query layer retries. It is not recommended to disable these retries in production. +1. Set the YB-TServer flag `enable_wait_queues=true`. +1. Set the per-session `yb_max_query_layer_retries=0` YSQL configuration parameter to disable internal query layer retries on conflict. This is done to illustrate the `Wait-on-Conflict` concurrency control semantics separately without query layer retries. It is not recommended to disable these retries in production. To set it at the cluster level, use the `ysql_pg_conf_csv` YB-TServer flag. -A restart is necessary for these flags to take effect. +A restart is necessary for the flags to take effect. Start by setting up the table you'll use in all of the examples in this section. diff --git a/docs/content/stable/develop/learn/transactions/transactions-errorcodes-ysql.md b/docs/content/stable/develop/learn/transactions/transactions-errorcodes-ysql.md index d1b3d5154832..fd92692472c1 100644 --- a/docs/content/stable/develop/learn/transactions/transactions-errorcodes-ysql.md +++ b/docs/content/stable/develop/learn/transactions/transactions-errorcodes-ysql.md @@ -80,13 +80,25 @@ The client can reconnect to the server and retry the transaction. This error occurs when a transaction cannot be applied or progress further because of other conflicting transactions. For example, when multiple transactions are modifying the same key. ```output -ERROR: 40001: Operation expired: Transaction XXXX expired or aborted by a conflict +ERROR: could not serialize access due to concurrent update (...) ``` {{}} Serialization failure errors can be retried by the client. See [Client-side retry](../transactions-retries-ysql/#client-side-retry). {{}} +## 40P01: Deadlock detected + +This error occurs when two or more transactions wait on each other to form a deadlock cycle. One or more of the transactions in the cycle are aborted and they fail with the following error. + +```output +ERROR: deadlock detected (...) +``` + +{{}} +Deadlock detected errors can be retried by the client. See [Client-side retry](../transactions-retries-ysql/#client-side-retry). +{{}} + ## 2D000: Invalid transaction termination This error occurs when a transaction is terminated either by a `COMMIT` or a `ROLLBACK` in an invalid location. For example, when a `COMMIT` is issued inside a stored procedure that is called from inside a transaction. diff --git a/docs/content/stable/develop/learn/transactions/transactions-retries-ysql.md b/docs/content/stable/develop/learn/transactions/transactions-retries-ysql.md index 02381d8a8968..99c75fbd2266 100644 --- a/docs/content/stable/develop/learn/transactions/transactions-retries-ysql.md +++ b/docs/content/stable/develop/learn/transactions/transactions-retries-ysql.md @@ -37,7 +37,7 @@ Follow the [setup instructions](../../../../explore#tabs-00-00) to start a singl ## Automatic retries -YugabyteDB retries failed transactions automatically on the server side whenever possible without client intervention as per the [concurrency control policies](../../../../architecture/transactions/concurrency-control/#best-effort-internal-retries-for-first-statement-in-a-transaction). This is the case even for single statements, which are implicitly considered transactions. In [Read Committed](../../../../explore/transactions/isolation-levels/#read-committed-isolation) isolation mode, the server retries indefinitely. +YugabyteDB retries failed transactions automatically on the server side whenever possible without client intervention as per the [concurrency control policies](../../../../architecture/transactions/concurrency-control/#best-effort-internal-retries-for-first-statement-in-a-transaction). This is the case even for single statements, which are implicitly considered transactions. In some scenarios, a server-side retry is not suitable. For example, the retry limit has been reached or the transaction is not in a valid state. In these cases, it is the client's responsibility to retry the transaction at the application layer. @@ -74,19 +74,11 @@ If the `COMMIT` is successful, the program exits the loop. `attempt < max_attemp ##### 40001 - SerializationFailure -SerializationFailure errors happen when multiple transactions are updating the same set of keys (conflict) or when transactions are waiting on each other (deadlock). The error messages could be one of the following types: +SerializationFailure errors happen when multiple transactions are updating the same set of keys (conflict). During a conflict, certain transactions are retried. -- During a conflict, certain transactions are retried. However, after the retry limit is reached, an error occurs as follows: - - ```output - ERROR: 40001: All transparent retries exhausted. - ``` - -- All transactions are given a dynamic priority. When a deadlock is detected, the transaction with lower priority is automatically killed. For this scenario, the client might receive a message similar to the following: - - ```output - ERROR: 40001: Operation expired: Heartbeat: Transaction XXXX expired or aborted by a conflict - ``` +```output +ERROR: could not serialize access due to concurrent update (...) +``` The correct way to handle this error is with a retry loop with exponential backoff, as described in [Client-side retry](#client-side-retry). When the [UPDATE](../../../../api/ysql/the-sql-language/statements/dml_update/) or [COMMIT](../../../../api/ysql/the-sql-language/statements/txn_commit/) fails because of `SerializationFailure`, the code retries after waiting for `sleep_time` seconds, up to `max_attempts`. @@ -96,6 +88,17 @@ In read committed isolation level, as the server retries internally, the client Another way to handle these failures is would be to rollback to a checkpoint before the failed statement and proceed further as described in [Savepoints](#savepoints). +##### 40001 - Deadlock detected + +This error occurs when two or more transactions wait on each other to form a deadlock cycle. One or more of the transactions in the cycle are aborted +and they fail with the following error: + +```output +ERROR: deadlock detected (...) +``` + +Retries to handle this error are similar to serialization errors (40001). + ## Savepoints [Savepoints](../../../../api/ysql/the-sql-language/statements/savepoint_create/) are named checkpoints that can be used to rollback just a few statements, and then proceed with the transaction, rather than aborting the entire transaction when there is an error. diff --git a/docs/content/stable/explore/transactions/distributed-transactions-ysql.md b/docs/content/stable/explore/transactions/distributed-transactions-ysql.md index b7cf352c7085..1ba3e8fd6576 100644 --- a/docs/content/stable/explore/transactions/distributed-transactions-ysql.md +++ b/docs/content/stable/explore/transactions/distributed-transactions-ysql.md @@ -156,9 +156,9 @@ Each update performed as a part of the transaction is replicated across multiple ### Concurrency control -[Concurrency control](../../../architecture/transactions/concurrency-control/) in databases ensures that multiple transactions can execute concurrently while preserving data integrity. Concurrency control is essential for correctness in environments where two or more transactions can access the same data at the same time. The two primary mechanisms to achieve concurrency control are optimistic and pessimistic. +[Concurrency control](../../../architecture/transactions/concurrency-control/) in databases ensures that multiple transactions can execute concurrently while preserving data integrity. Concurrency control is essential for correctness in environments where two or more transactions can access the same data at the same time. YugabyteDB currently supports two concurrency control mechanisms: Wait-on-Conflict and Fail-on-Conflict. -YugabyteDB currently supports optimistic concurrency control, with pessimistic concurrency control being worked on actively. +These are orthogonal to isolation levels (except for Read Committed isolation which has the same behaviour in both modes barring some limitations and performance penalty in Fail-on-Conflict mode). Also note that the Wait-on-Conflict concurrency control mode exactly matches PostgreSQL semantics and also gives better performance. ## Transaction options diff --git a/docs/content/v2.18/develop/learn/transactions/transactions-retries-ysql.md b/docs/content/v2.18/develop/learn/transactions/transactions-retries-ysql.md index 975999a08f26..ad3aadecb2d3 100644 --- a/docs/content/v2.18/develop/learn/transactions/transactions-retries-ysql.md +++ b/docs/content/v2.18/develop/learn/transactions/transactions-retries-ysql.md @@ -37,7 +37,7 @@ Follow the [setup instructions](../../../../explore#tabs-00-00) to start a singl ## Automatic retries -YugabyteDB retries failed transactions automatically on the server side whenever possible without client intervention as per the [concurrency control policies](../../../../architecture/transactions/concurrency-control/#best-effort-internal-retries-for-first-statement-in-a-transaction). This is the case even for single statements, which are implicitly considered transactions. In [Read Committed](../../../../explore/transactions/isolation-levels/#read-committed-isolation) isolation mode, the server retries indefinitely. +YugabyteDB retries failed transactions automatically on the server side whenever possible without client intervention as per the [concurrency control policies](../../../../architecture/transactions/concurrency-control/#best-effort-internal-retries-for-first-statement-in-a-transaction). This is the case even for single statements, which are implicitly considered transactions. In some scenarios, a server-side retry is not suitable. For example, the retry limit has been reached or the transaction is not in a valid state. In these cases, it is the client's responsibility to retry the transaction at the application layer. diff --git a/docs/content/v2.20/develop/learn/transactions/transactions-retries-ysql.md b/docs/content/v2.20/develop/learn/transactions/transactions-retries-ysql.md index 856b018c2052..eb6291a430d9 100644 --- a/docs/content/v2.20/develop/learn/transactions/transactions-retries-ysql.md +++ b/docs/content/v2.20/develop/learn/transactions/transactions-retries-ysql.md @@ -37,7 +37,7 @@ Follow the [setup instructions](../../../../explore#tabs-00-00) to start a singl ## Automatic retries -YugabyteDB retries failed transactions automatically on the server side whenever possible without client intervention as per the [concurrency control policies](../../../../architecture/transactions/concurrency-control/#best-effort-internal-retries-for-first-statement-in-a-transaction). This is the case even for single statements, which are implicitly considered transactions. In [Read Committed](../../../../explore/transactions/isolation-levels/#read-committed-isolation) isolation mode, the server retries indefinitely. +YugabyteDB retries failed transactions automatically on the server side whenever possible without client intervention as per the [concurrency control policies](../../../../architecture/transactions/concurrency-control/#best-effort-internal-retries-for-first-statement-in-a-transaction). This is the case even for single statements, which are implicitly considered transactions. In some scenarios, a server-side retry is not suitable. For example, the retry limit has been reached or the transaction is not in a valid state. In these cases, it is the client's responsibility to retry the transaction at the application layer. From e4a854815005a1f0d042f85042734c9f57bf67fa Mon Sep 17 00:00:00 2001 From: Aleksandr Malyshev Date: Wed, 18 Sep 2024 00:10:33 +0300 Subject: [PATCH 72/75] [PLAT-15326] Proper error handling in DDL atomicity check Summary: Currently various errors are not handled properly in ddl atomicity check. Fixing it. Test Plan: Install 2.20.0.0_b50 universe. Make sure DDL atomicity check passes. Force DDL atomicity errors. Force check. Make sure check show valid errors. Stop TServer on master leader node. Make sure check shows 'TServer is not running on this node' error Stop 2 masters. Make sure check shows 'Master Leader HTTP endpoint is not running' error Reviewers: vbansal, skurapati Reviewed By: vbansal Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D38142 --- .../resources/health/node_health.py.template | 119 +++++++++++++++--- 1 file changed, 99 insertions(+), 20 deletions(-) diff --git a/managed/src/main/resources/health/node_health.py.template b/managed/src/main/resources/health/node_health.py.template index eb290383c608..29531fc7aabb 100755 --- a/managed/src/main/resources/health/node_health.py.template +++ b/managed/src/main/resources/health/node_health.py.template @@ -513,7 +513,7 @@ def wrap_command(command_str): return "timeout {} bash -c 'set -o pipefail; {}'".format(CMD_TIMEOUT_SEC, command_str) -def has_errors(str): +def check_for_errors(str): return str is not None and str.startswith('Error') @@ -628,7 +628,7 @@ class NodeChecker(): found_error = False output = self.get_disk_utilization() msgs = [] - if has_errors(output): + if check_for_errors(output): return e.fill_and_return_entry([output], has_error=True) # Do not process the headers. @@ -706,7 +706,7 @@ class NodeChecker(): return e.fill_and_return_warning_entry(["OpenSSL is not installed, skipped"]) output = self.get_certificate_expiration_date(cert_path) - if has_errors(output): + if check_for_errors(output): return e.fill_and_return_entry([output], has_error=True) if output == 'File not found': @@ -868,7 +868,7 @@ class NodeChecker(): cmd = 'curl -s -L --insecure {}'.format(endpoint) output = self._check_output(cmd).strip() - if has_errors(output): + if check_for_errors(output): logging.info("HTTP request to {} returned error: {}".format(endpoint, output)) return None return output @@ -952,7 +952,7 @@ class NodeChecker(): '-mmin -{}'.format(FATAL_TIME_THRESHOLD_MINUTES), log_severity) output = self._check_output(cmd) - if has_errors(output): + if check_for_errors(output): return e.fill_and_return_entry([output], has_error=True) log_files = self.check_logs_find_output(output) @@ -987,7 +987,7 @@ class NodeChecker(): yb_cores_dir, yb_cores_dir, '-mmin -{}'.format(FATAL_TIME_THRESHOLD_MINUTES)) output = self._check_output(cmd) - if has_errors(output): + if check_for_errors(output): return e.fill_and_return_entry([output], has_error=True) files = [] @@ -1056,11 +1056,11 @@ class NodeChecker(): else: e = self._new_metric_entry("Uptime", process) uptime = self.get_uptime_for_process(process) - if has_errors(uptime): + if check_for_errors(uptime): return e.fill_and_return_entry([uptime], has_error=True) boot_time = self.get_boot_time_for_process(process) - if has_errors(boot_time): + if check_for_errors(boot_time): return e.fill_and_return_entry([boot_time], has_error=True) if uptime is None or not uptime: @@ -1275,7 +1275,7 @@ class NodeChecker(): logging.info("Checking node exporter on node {}".format(self.node)) e = self._new_metric_entry("Node exporter") output = self.get_command_for_process("node_exporter") - if has_errors(output): + if check_for_errors(output): return e.fill_and_return_entry([output], has_error=True) metric = Metric.from_definition(YB_NODE_CUSTOM_NODE_METRICS) running = ('node_exporter' in output) @@ -1313,7 +1313,7 @@ class NodeChecker(): awk '\"'\"'{print $1}'\"'\"' /proc/sys/fs/file-nr" output = self._check_output(cmd) - if has_errors(output): + if check_for_errors(output): return e.fill_and_return_entry([output], has_error=True) counts = output.split('\n') @@ -1584,6 +1584,13 @@ class NodeChecker(): e = self._new_entry("DDL atomicity") metric = Metric.from_definition(YB_DDL_ATOMICITY_CHECK) + tserver_pid = self.get_process_pid_by_name(TSERVER) + if tserver_pid is None: + metric.add_value(0) + return e.fill_and_return_entry(["TServer is not running on this node"], + has_error=True, + metrics=[metric]) + try: ysqlsh_cmd = self.create_ysqlsh_command("") except RuntimeError as re: @@ -1593,8 +1600,23 @@ class NodeChecker(): errors = [] try: # Get table data - tables_output = (json.loads(self.http_request( - "{}/api/v1/tables".format(self.master_leader_url)))) + tables_response = self.http_request( + "{}/api/v1/tables".format(self.master_leader_url)) + if not tables_response: + metric.add_value(0) + return e.fill_and_return_entry( + ['Master Leader HTTP endpoint is not running'], + has_error=True, + metrics=[metric]) + try: + tables_output = json.loads(tables_response) + except Exception as ex: + logging.warning("Tables HTTP API response is not a valid json: %s", tables_response) + metric.add_value(0) + return e.fill_and_return_entry( + ['Tables HTTP API response is not a valid json'], + has_error=True, + metrics=[metric]) table_data_json = tables_output["user"] table_data_json += tables_output["index"] @@ -1625,10 +1647,27 @@ class NodeChecker(): (SELECT relname, oid, relfilenode FROM pg_class WHERE oid >= 16384) t;") # Fetch all user tables from pg_class for the database - pg_class_output = json.loads(self._check_output(pg_class_cmd).strip()) + pg_class_output = self._check_output(pg_class_cmd).strip() + + if check_for_errors(pg_class_output): + metric.add_value(0) + return e.fill_and_return_entry( + ["Failed to retrieve pg_class info: {}".format(pg_class_output)], + has_error=True, + metrics=[metric]) + try: + pg_class_json = json.loads(pg_class_output) + except Exception as ex: + logging.warning("pg_class query returned invalid json: %s", + pg_class_output) + metric.add_value(0) + return e.fill_and_return_entry( + ['pg_class query returned invalid json'], + has_error=True, + metrics=[metric]) pg_class_oid_tableinfo_dict = {} # Use relfilenode if it exists (as the table may be rewritten) - for table in pg_class_output: + for table in pg_class_json: if table['relfilenode'] != '0': pg_class_oid_tableinfo_dict[table['relfilenode']] = table else: @@ -1637,9 +1676,28 @@ class NodeChecker(): pg_attribute_cmd = "{}{} -t -c \"{}\"".format(ysqlsh_cmd, dbname, "SELECT json_agg(row_to_json(t)) FROM \ (SELECT attname, attrelid FROM pg_attribute WHERE attrelid >= 16384) t;") - pg_attribute_output = json.loads(self._check_output(pg_attribute_cmd).strip()) + + pg_attribute_output = self._check_output(pg_attribute_cmd).strip() + + if check_for_errors(pg_attribute_output): + metric.add_value(0) + return e.fill_and_return_entry( + ["Failed to retrieve pg_attribute info: {}".format(pg_attribute_output)], + has_error=True, + metrics=[metric]) + + try: + pg_attribute_json = json.loads(pg_attribute_output) + except Exception as ex: + logging.warning("pg_attribute query returned invalid json: %s", + pg_attribute_output) + metric.add_value(0) + return e.fill_and_return_entry( + ['pg_attribute query returned invalid json'], + has_error=True, + metrics=[metric]) pg_attribute_attrelid_attnames_dict = defaultdict(list) - for attribute in pg_attribute_output: + for attribute in pg_attribute_json: (pg_attribute_attrelid_attnames_dict[attribute['attrelid']] .append(attribute['attname'])) @@ -1667,8 +1725,25 @@ class NodeChecker(): continue # Get columns - table_schema_json = json.loads(self.http_request( - "{}/api/v1/table?id={}".format(self.master_leader_url, tableid))) + table_schema_response = self.http_request( + "{}/api/v1/table?id={}".format(self.master_leader_url, tableid)) + if not table_schema_response: + metric.add_value(0) + return e.fill_and_return_entry( + ['Master Leader HTTP endpoint is not running'], + has_error=True, + metrics=[metric]) + + try: + table_schema_json = json.loads(table_schema_response) + except Exception as ex: + logging.warning("Table HTTP API response is not a valid json: %s", + table_schema_response) + metric.add_value(0) + return e.fill_and_return_entry( + ['Table HTTP API response is not a valid json'], + has_error=True, + metrics=[metric]) columns = [html.unescape( column['column']) for column in table_schema_json["columns"]] # Check if each column exists in pg_attribute @@ -1682,8 +1757,12 @@ class NodeChecker(): .format(column, tablename, dbname)) continue except Exception as ex: + logging.exception('Got exception on while performing DDL Atomicity check') metric.add_value(0) - return e.fill_and_return_entry([str(ex)], has_error=True, metrics=[metric]) + return e.fill_and_return_entry( + ["Unexpected error occurred"], + has_error=True, + metrics=[metric]) has_errors = len(errors) > 0 if has_errors: @@ -1702,7 +1781,7 @@ class NodeChecker(): logging.info("OpenSSL installed state for node %s: %s", self.node, output) return {"ssl_installed:" + self.node: (output == "0") - if not has_errors(output) else None} + if not check_for_errors(output) else None} def check_yb_controller_availability(self): controller_cli = '{}/bin/yb-controller-cli'.format(self.yb_controller_dir()) From 10a629ef7416710e55dbb29a71783d2f16d6db78 Mon Sep 17 00:00:00 2001 From: Deepti-yb Date: Wed, 11 Sep 2024 08:07:16 +0000 Subject: [PATCH 73/75] [PLAT-13998][PLAT-15215]Support Image bundle creation and updation in provider requests Summary: Add `--image-bundle` field for users to supply custom AMI images for CSPs For AWS provider create: ``` --image-bundle stringArray [Optional] Image bundles associated with AWS provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,arch=,ssh-user=,ssh-port=,imdsv2=,default=". Image bundle name, architecture and SSH user are required key-value pairs. The default for SSH Port is 22, IMDSv2 (This should be true if the Image bundle requires Instance Metadata Service v2) is false. Default marks the image bundle as default for the provider. Allowed values for architecture are x86_64 and arm64.Each image bundle can be added using separate --image-bundle flag. Example: --image-bundle =,=,=22 --image-bundle-region-override stringArray [Optional] Image bundle region overrides associated with AWS provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,region-name=,machine-image=". All are required key-value pairs. Each --image-bundle definition must have atleast one corresponding --image-bundle-region-override definition for every region added. Each image bundle can be added using separate --image-bundle-region-override flag. Example: --image-bundle =,=,= ``` For AWS provider update: ``` --add-image-bundle stringArray [Optional] Add Image bundles associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,arch=,ssh-user=,ssh-port=,imdsv2=,default=". Image bundle name, architecture and SSH user are required key-value pairs. The default for SSH Port is 22, IMDSv2 (This should be true if the Image bundle requires Instance Metadata Service v2) is false. Default marks the image bundle as default for the provider. Allowed values for architecture are x86_64 and arm64.Each image bundle can be added using separate --image-bundle flag. Example: --add-image-bundle =,=,=22 --add-image-bundle-region-override stringArray [Optional] Add Image bundle region overrides associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,region-name=,machine-image=". All are required key-value pairs. Each --image-bundle definition must have atleast one corresponding --image-bundle-region-override definition for every region added. Each override can be added using separate --image-bundle-region-override flag. Example: --add-image-bundle-region-override =,=,= --edit-image-bundle stringArray [Optional] Edit Image bundles associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-uuid=,ssh-user=,ssh-port=,imdsv2=,default=". Image bundle UUID is a required key-value pair.Each image bundle can be edited using separate --image-bundle flag. Example: --edit-image-bundle =,=,=22 --edit-image-bundle-region-override stringArray [Optional] Edit overrides of the region associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-uuid=,region-name=,machine-image=". All are required key-value pairs. Each image bundle can be added using separate --image-bundle-region-override flag. Example: --edit-image-bundle-region-override =,=,= --remove-image-bundle stringArray [Optional] Image bundle UUID to be removed from the provider. Each bundle to be removed needs to be provided using a separate --remove-image-bundle definition. Removing a image bundle removes the corresponding region overrides. ``` For GCP Provider create: ``` --image-bundle stringArray [Optional] Intel x86_64 image bundles associated with GCP provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,machine-image=,ssh-user=,ssh-port=,default=". Image bundle name, machine image and SSH user are required key-value pairs. The default SSH Port is 22. Default marks the image bundle as default for the provider. Each image bundle can be added using separate --image-bundle flag. Example: --image-bundle =,machine-image=,=,=22 ``` For GCP provider update: ``` --add-image-bundle stringArray [Optional] Add Intel x86_64 image bundles associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,machine-image=,ssh-user=,ssh-port=,default=". Image bundle name, machine image and SSH user are required key-value pairs. The default SSH Port is 22. Default marks the image bundle as default for the provider. Each image bundle can be added using separate --image-bundle flag. Example: --add-image-bundle =,machine-image=,=,=22 --edit-image-bundle stringArray [Optional] Edit Intel x86_64 image bundles associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-uuid=,machine-image=,ssh-user=,ssh-port=,default=". Image bundle UUID is a required key-value pair.Each image bundle can be added using separate --image-bundle flag. Example: --edit-image-bundle =,machine-image=,=,=22 --remove-image-bundle stringArray [Optional] Image bundle UUID to be removed from the provider. Each bundle to be removed needs to be provided using a separate --remove-image-bundle definition. ``` Azure Provider create: ``` -name=,machine-image=,ssh-user=,ssh-port=,default=". Image bundle name, machine image and SSH user are required key-value pairs. The default SSH Port is 22. Default marks the image bundle as default for the provider. Each image bundle can be added using separate --image-bundle flag. Example: --image-bundle =,machine-image=,=,=22 ``` Azure Provider update: ``` --add-image-bundle stringArray [Optional] Add Intel x86_64 image bundles associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,machine-image=,ssh-user=,ssh-port=,default=". Image bundle name, machine image and SSH user are required key-value pairs. The default SSH Port is 22. Default marks the image bundle as default for the provider. Each image bundle can be added using separate --image-bundle flag. Example: --add-image-bundle =,machine-image=,=,=22 --edit-image-bundle stringArray [Optional] Edit Intel x86_64 image bundles associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-uuid=,machine-image=,ssh-user=,ssh-port=,default=". Image bundle UUID is a required key-value pair.Each image bundle can be added using separate --image-bundle flag. Example: --edit-image-bundle =,machine-image=,=,=22 --remove-image-bundle stringArray [Optional] Image bundle UUID to be removed from the provider. Each bundle to be removed needs to be provided using a separate --remove-image-bundle definition. ``` > SSH User and SSH Port have been marked as deprecated. It is still supported by the CLI, but a message to use image bundles will be displayed on usage of these fields Test Plan: AWS Provider create ``` ./yba provider aws create -n dkumar-cli \ --region region-name=us-west-2,vpc-id=<>,sg-id=<> \ --zone zone-name=us-west-2a,region-name=us-west-2,subnet=<> \ --zone zone-name=us-west-2b,region-name=us-west-2,subnet=<> \ --access-key-id <> --secret-access-key <> \ --image-bundle image-bundle-name=test-cli,arch=x86_64,ssh-user=ec2-user,ssh-port=22,imdsv2=false,default=false \ --image-bundle-region-override image-bundle-name=test-cli,region-name=us-west-2,machine-image=<>\ --image-bundle-region-override image-bundle-name=test-cli,region-name=us-west-1,machine-image=<> ``` GCP provider create: ``` ./yba provider gcp create -n dkumar-cli \ --region region-name=us-west1,shared-subnet=<> \ --network <> \ --region region-name=us-west2,shared-subnet=<>\ --image-bundle image-bundle-name=test-cli,machine-image=<>,ssh-user=centos ``` Reviewers: svarshney, sneelakantan Reviewed By: svarshney Subscribers: yugaware Differential Revision: https://phorge.dev.yugabyte.com/D37968 --- .../cmd/provider/aws/create_provider.go | 152 ++++++++- .../cmd/provider/aws/update_provider.go | 307 +++++++++++++++++- .../cmd/provider/azu/create_provider.go | 93 +++++- .../cmd/provider/azu/update_provider.go | 209 +++++++++++- .../cmd/provider/gcp/create_provider.go | 89 ++++- .../cmd/provider/gcp/update_provider.go | 214 +++++++++++- .../providerutil/updateproviderutil.go | 191 ++++++++++- managed/yba-cli/cmd/util/util.go | 14 + .../yba-cli/docs/yba_provider_aws_create.md | 26 +- .../yba-cli/docs/yba_provider_aws_update.md | 35 +- .../yba-cli/docs/yba_provider_azure_create.md | 5 +- .../yba-cli/docs/yba_provider_azure_update.md | 41 +-- .../yba-cli/docs/yba_provider_gcp_create.md | 5 +- .../yba-cli/docs/yba_provider_gcp_update.md | 35 +- managed/yba-cli/internal/formatter/aws/aws.go | 9 +- managed/yba-cli/internal/formatter/azu/azu.go | 9 +- managed/yba-cli/internal/formatter/gcp/gcp.go | 9 +- .../formatter/provider/imagebundle.go | 2 +- 18 files changed, 1308 insertions(+), 137 deletions(-) diff --git a/managed/yba-cli/cmd/provider/aws/create_provider.go b/managed/yba-cli/cmd/provider/aws/create_provider.go index 2dbcf71a9d33..698ac110c6db 100644 --- a/managed/yba-cli/cmd/provider/aws/create_provider.go +++ b/managed/yba-cli/cmd/provider/aws/create_provider.go @@ -7,6 +7,7 @@ package aws import ( "fmt" "os" + "strconv" "strings" "github.com/sirupsen/logrus" @@ -160,11 +161,26 @@ var createAWSProviderCmd = &cobra.Command{ logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) } + awsRegions := buildAWSRegions(regions, zones, allowed, version) + + imageBundles, err := cmd.Flags().GetStringArray("image-bundle") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + regionOverrides, err := cmd.Flags().GetStringArray("image-bundle-region-override") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + awsImageBundles := buildAWSImageBundles(imageBundles, regionOverrides, len(awsRegions)) + requestBody := ybaclient.Provider{ Code: util.GetStringPointer(providerCode), AllAccessKeys: &allAccessKeys, + ImageBundles: awsImageBundles, Name: util.GetStringPointer(providerName), - Regions: buildAWSRegions(regions, zones, allowed, version), + Regions: awsRegions, Details: &ybaclient.ProviderDetails{ AirGapInstall: util.GetBoolPointer(airgapInstall), SshPort: util.GetInt32Pointer(int32(sshPort)), @@ -218,11 +234,11 @@ func init() { "[Required] Region associated with the AWS provider. Minimum number of required "+ "regions = 1. Provide the following comma separated fields as key-value pairs:"+ "\"region-name=,"+ - "vpc-id=,sg-id=,arch=,yb-image=\". "+ + "vpc-id=,sg-id=\". "+ formatter.Colorize("Region name is required key-value.", formatter.GreenColor)+ - " VPC ID, Security Group ID, YB Image (AMI) and Architecture"+ - " (Default to x86_84) are optional. "+ + " VPC ID and Security Group ID"+ + " are optional. "+ "Each region needs to be added using a separate --region flag. "+ "Example: --region region-name=us-west-2,vpc-id=,sg-id= "+ "--region region-name=us-east-2,vpc-id=,sg-id=") @@ -240,10 +256,49 @@ func init() { "Example: --zone zone-name=us-west-2a,region-name=us-west-2,subnet="+ " --zone zone-name=us-west-2b,region-name=us-west-2,subnet=") + // createAWSProviderCmd.Flags().Bool("add-x86-default-image-bundle", false, + // "[Optional] Include Linux versions that are chosen and managed by"+ + // " YugabyteDB Anywhere in the catalog. (default false)") + // createAWSProviderCmd.Flags().Bool("add-aarch6-default-image-bundle", false, + // "[Optional] Include Linux versions that are chosen and managed by"+ + // " YugabyteDB Anywhere in the catalog. (default false)") + + createAWSProviderCmd.Flags().StringArray("image-bundle", []string{}, + "[Optional] Image bundles associated with AWS provider. "+ + "Provide the following comma separated fields as key-value pairs: "+ + "\"image-bundle-name=,arch=,"+ + "ssh-user=,ssh-port=,imdsv2=,default=\". "+ + formatter.Colorize( + "Image bundle name, architecture and SSH user are required key-value pairs.", + formatter.GreenColor)+ + " The default for SSH Port is 22, IMDSv2 ("+ + "This should be true if the Image bundle requires Instance Metadata Service v2)"+ + " is false. Default marks the image bundle as default for the provider. "+ + "Allowed values for architecture are x86_64 and arm64."+ + "Each image bundle can be added using separate --image-bundle flag. "+ + "Example: --image-bundle =,"+ + "=,=22") + createAWSProviderCmd.Flags().StringArray("image-bundle-region-override", []string{}, + "[Optional] Image bundle region overrides associated with AWS provider. "+ + "Provide the following comma separated fields as key-value pairs: "+ + "\"image-bundle-name=,region-name=,"+ + "machine-image=\". "+ + formatter.Colorize( + "All are required key-value pairs.", + formatter.GreenColor)+" Each --image-bundle definition "+ + "must have atleast one corresponding --image-bundle-region-override "+ + "definition for every region added."+ + " Each image bundle can be added using separate --image-bundle-region-override flag. "+ + "Example: --image-bundle =,"+ + "=,=") + createAWSProviderCmd.Flags().String("ssh-user", "ec2-user", "[Optional] SSH User to access the YugabyteDB nodes.") createAWSProviderCmd.Flags().Int("ssh-port", 22, "[Optional] SSH Port to access the YugabyteDB nodes.") + createAWSProviderCmd.Flags().MarkDeprecated("ssh-port", "Use --image-bundle instead.") + createAWSProviderCmd.Flags().MarkDeprecated("ssh-user", "Use --image-bundle instead.") + createAWSProviderCmd.Flags().String("custom-ssh-keypair-name", "", "[Optional] Provide custom key pair name to access YugabyteDB nodes. "+ "If left empty, "+ @@ -283,8 +338,6 @@ func buildAWSRegions(regionStrings, zoneStrings []string, allowed bool, Details: &ybaclient.RegionDetails{ CloudInfo: &ybaclient.RegionCloudInfo{ Aws: &ybaclient.AWSRegionCloudInfo{ - YbImage: util.GetStringPointer(region["yb-image"]), - Arch: util.GetStringPointer(region["arch"]), SecurityGroupId: util.GetStringPointer(region["sg-id"]), Vnet: util.GetStringPointer(region["vpc-id"]), }, @@ -328,3 +381,90 @@ func buildAWSZones(zoneStrings []string, regionName string) (res []ybaclient.Ava } return res } + +func buildAWSImageBundles( + imageBundles, regionOverrides []string, + numberOfRegions int) []ybaclient.ImageBundle { + res := make([]ybaclient.ImageBundle, 0) + for _, i := range imageBundles { + bundle := providerutil.BuildImageBundleMapFromString(i, "add") + bundle = providerutil.DefaultImageBundleValues(bundle) + + if _, ok := bundle["ssh-user"]; !ok { + logrus.Fatalln( + formatter.Colorize( + "SSH User not specified in image bundle.\n", + formatter.RedColor)) + } + + regionOverrides := buildAWSImageBundleRegionOverrides(regionOverrides, bundle["name"]) + + if len(regionOverrides) < numberOfRegions { + logrus.Fatalf(formatter.Colorize( + "Machine Image must be provided for every region added.\n", + formatter.RedColor, + )) + } + + sshPort, err := strconv.ParseInt(bundle["ssh-port"], 10, 64) + if err != nil { + errMessage := err.Error() + " Using SSH Port as 22\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + sshPort = 22 + } + + defaultBundle, err := strconv.ParseBool(bundle["default"]) + if err != nil { + errMessage := err.Error() + " Setting default as false\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + defaultBundle = false + } + + useIMDSv2, err := strconv.ParseBool(bundle["imdsv2"]) + if err != nil { + errMessage := err.Error() + " Setting default as false\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + useIMDSv2 = false + } + + imageBundle := ybaclient.ImageBundle{ + Name: util.GetStringPointer(bundle["name"]), + UseAsDefault: util.GetBoolPointer(defaultBundle), + Details: &ybaclient.ImageBundleDetails{ + Arch: util.GetStringPointer(bundle["arch"]), + SshUser: util.GetStringPointer(bundle["ssh-user"]), + SshPort: util.GetInt32Pointer(int32(sshPort)), + UseIMDSv2: util.GetBoolPointer(useIMDSv2), + Regions: ®ionOverrides, + }, + } + res = append(res, imageBundle) + } + return res +} + +func buildAWSImageBundleRegionOverrides( + regionOverrides []string, + name string) map[string]ybaclient.BundleInfo { + res := map[string]ybaclient.BundleInfo{} + for _, r := range regionOverrides { + override := providerutil.BuildImageBundleRegionOverrideMapFromString(r, "add") + if strings.Compare(override["name"], name) == 0 { + res[override["region-name"]] = ybaclient.BundleInfo{ + YbImage: util.GetStringPointer(override["machine-image"]), + } + } + } + if len(res) == 0 { + logrus.Fatalln( + formatter.Colorize("Machine Image not specified in image bundle.\n", + formatter.RedColor)) + } + return res +} diff --git a/managed/yba-cli/cmd/provider/aws/update_provider.go b/managed/yba-cli/cmd/provider/aws/update_provider.go index b8441b18b32e..8c26bc83b0ad 100644 --- a/managed/yba-cli/cmd/provider/aws/update_provider.go +++ b/managed/yba-cli/cmd/provider/aws/update_provider.go @@ -6,6 +6,7 @@ package aws import ( "fmt" + "strconv" "strings" @@ -97,6 +98,8 @@ var updateAWSProviderCmd = &cobra.Command{ cloudInfo := details.GetCloudInfo() awsCloudInfo := cloudInfo.GetAws() + providerImageBundles := provider.GetImageBundles() + newProviderName, err := cmd.Flags().GetString("new-name") if err != nil { logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) @@ -235,6 +238,53 @@ var updateAWSProviderCmd = &cobra.Command{ provider.SetRegions(providerRegions) // End of Updating Regions + // Update Image Bundles + + addImageBundles, err := cmd.Flags().GetStringArray("add-image-bundle") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + editImageBundles, err := cmd.Flags().GetStringArray("edit-image-bundle") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + removeImageBundles, err := cmd.Flags().GetStringArray("remove-image-bundle") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + editImageBundleRegionOverride, err := cmd.Flags().GetStringArray( + "edit-image-bundle-region-override") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + addImageBundleRegionOverride, err := cmd.Flags().GetStringArray( + "add-image-bundle-region-override") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + providerImageBundles = removeAWSImageBundles(removeImageBundles, providerImageBundles) + + providerImageBundles = editAWSImageBundles( + editImageBundles, + editImageBundleRegionOverride, + providerImageBundles) + + providerImageBundles = addAWSImageBundles( + addImageBundles, + addImageBundleRegionOverride, + providerImageBundles, + len(providerRegions), + ) + + provider.SetImageBundles(providerImageBundles) + + // End of Updating Image Bundles + rUpdate, response, err := authAPI.EditProvider(provider.GetUuid()). EditProviderRequest(provider).Execute() if err != nil { @@ -280,10 +330,10 @@ func init() { "[Optional] Add region associated with the AWS provider. "+ "Provide the following comma separated fields as key-value pairs:"+ "\"region-name=,"+ - "vpc-id=,sg-id=,arch=\". "+ + "vpc-id=,sg-id=\". "+ formatter.Colorize("Region name is required key-value.", formatter.GreenColor)+ - " VPC ID, Security Group ID and Architecture are optional. "+ + " VPC ID and Security Group ID are optional. "+ "Each region needs to be added using a separate --add-region flag.") updateAWSProviderCmd.Flags().StringArray("add-zone", []string{}, "[Optional] Zone associated to the AWS Region defined. "+ @@ -329,11 +379,79 @@ func init() { "Subnet IDs and Secondary subnet ID is optional. "+ "Each zone needs to be modified using a separate --edit-zone flag.") + // updateAWSProviderCmd.Flags().Bool("add-x86-default-image-bundle", false, + // "[Optional] Include Linux versions that are chosen and managed by"+ + // " YugabyteDB Anywhere in the catalog. (default false)") + // updateAWSProviderCmd.Flags().Bool("add-aarch6-default-image-bundle", false, + // "[Optional] Include Linux versions that are chosen and managed by"+ + // " YugabyteDB Anywhere in the catalog. (default false)") + + updateAWSProviderCmd.Flags().StringArray("add-image-bundle", []string{}, + "[Optional] Add Image bundles associated with the provider. "+ + "Provide the following comma separated fields as key-value pairs: "+ + "\"image-bundle-name=,arch=,"+ + "ssh-user=,ssh-port=,imdsv2=,default=\". "+ + formatter.Colorize( + "Image bundle name, architecture and SSH user are required key-value pairs.", + formatter.GreenColor)+ + " The default for SSH Port is 22, IMDSv2 ("+ + "This should be true if the Image bundle requires Instance Metadata Service v2)"+ + " is false. Default marks the image bundle as default for the provider. "+ + "Allowed values for architecture are x86_64 and arm64."+ + "Each image bundle can be added using separate --image-bundle flag. "+ + "Example: --add-image-bundle =,"+ + "=,=22") + updateAWSProviderCmd.Flags().StringArray("add-image-bundle-region-override", []string{}, + "[Optional] Add Image bundle region overrides associated with the provider. "+ + "Provide the following comma separated fields as key-value pairs: "+ + "\"image-bundle-name=,region-name=,"+ + "machine-image=\". "+ + formatter.Colorize( + "All are required key-value pairs.", + formatter.GreenColor)+" Each --image-bundle definition "+ + "must have atleast one corresponding --image-bundle-region-override "+ + "definition for every region added."+ + " Each override can be added using separate --image-bundle-region-override flag. "+ + "Example: --add-image-bundle-region-override =,"+ + "=,=") + + updateAWSProviderCmd.Flags().StringArray("edit-image-bundle", []string{}, + "[Optional] Edit Image bundles associated with the provider. "+ + "Provide the following comma separated fields as key-value pairs: "+ + "\"image-bundle-uuid=,"+ + "ssh-user=,ssh-port=,imdsv2=,default=\". "+ + formatter.Colorize( + "Image bundle UUID is a required key-value pair.", + formatter.GreenColor)+ + " Each image bundle can be edited using separate --image-bundle flag. "+ + "Example: --edit-image-bundle =,"+ + "=,=22") + updateAWSProviderCmd.Flags().StringArray("edit-image-bundle-region-override", []string{}, + "[Optional] Edit overrides of the region associated with the provider. "+ + "Provide the following comma separated fields as key-value pairs: "+ + "\"image-bundle-uuid=,region-name=,"+ + "machine-image=\". "+ + formatter.Colorize( + "All are required key-value pairs.", + formatter.GreenColor)+ + " Each image bundle can be added using separate --image-bundle-region-override flag. "+ + "Example: --edit-image-bundle-region-override =,"+ + "=,=") + + updateAWSProviderCmd.Flags().StringArray("remove-image-bundle", []string{}, + "[Optional] Image bundle UUID to be removed from the provider. "+ + "Each bundle to be removed needs to be provided using a separate "+ + "--remove-image-bundle definition. "+ + "Removing a image bundle removes the corresponding region overrides.") + updateAWSProviderCmd.Flags().String("ssh-user", "", "[Optional] Updating SSH User to access the YugabyteDB nodes.") updateAWSProviderCmd.Flags().Int("ssh-port", 0, "[Optional] Updating SSH Port to access the YugabyteDB nodes.") + updateAWSProviderCmd.Flags().MarkDeprecated("ssh-port", "Use --edit-image-bundle instead.") + updateAWSProviderCmd.Flags().MarkDeprecated("ssh-user", "Use --edit-image-bundle instead.") + updateAWSProviderCmd.Flags().Bool("airgap-install", false, "[Optional] Are YugabyteDB nodes installed in an air-gapped environment,"+ " lacking access to the public internet for package downloads. "+ @@ -420,8 +538,6 @@ func addAWSRegions( Details: &ybaclient.RegionDetails{ CloudInfo: &ybaclient.RegionCloudInfo{ Aws: &ybaclient.AWSRegionCloudInfo{ - YbImage: util.GetStringPointer(region["yb-image"]), - Arch: util.GetStringPointer(region["arch"]), SecurityGroupId: util.GetStringPointer(region["sg-id"]), Vnet: util.GetStringPointer(region["vpc-id"]), }, @@ -547,3 +663,186 @@ func addAWSZones( return zones } + +func removeAWSImageBundles( + removeImageBundles []string, + providerImageBundles []ybaclient.ImageBundle) []ybaclient.ImageBundle { + if len(removeImageBundles) == 0 { + return providerImageBundles + } + + for _, ib := range removeImageBundles { + for i, pIb := range providerImageBundles { + if strings.Compare(pIb.GetUuid(), ib) == 0 { + providerImageBundles = util.RemoveComponentFromSlice( + providerImageBundles, i, + ).([]ybaclient.ImageBundle) + } + } + } + + return providerImageBundles +} + +func editAWSImageBundles( + editImageBundles, editImageBundlesRegionOverrides []string, + providerImageBundles []ybaclient.ImageBundle, +) []ybaclient.ImageBundle { + + for i, ib := range providerImageBundles { + bundleUUID := ib.GetUuid() + details := ib.GetDetails() + imageBundleRegionOverrides := details.GetRegions() + imageBundleRegionOverrides = editAWSImageBundleRegionOverrides( + bundleUUID, + editImageBundlesRegionOverrides, + imageBundleRegionOverrides) + details.SetRegions(imageBundleRegionOverrides) + if len(editImageBundles) != 0 { + for _, imageBundleString := range editImageBundles { + imageBundle := providerutil.BuildImageBundleMapFromString( + imageBundleString, + "edit", + ) + + if strings.Compare(imageBundle["uuid"], bundleUUID) == 0 { + + if len(imageBundle["ssh-user"]) != 0 { + details.SetSshUser(imageBundle["ssh-user"]) + } + if len(imageBundle["ssh-port"]) != 0 { + sshPort, err := strconv.ParseInt(imageBundle["ssh-port"], 10, 64) + if err != nil { + errMessage := err.Error() + " Using SSH Port as 22\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + sshPort = 22 + } + details.SetSshPort(int32(sshPort)) + } + if len(imageBundle["imdsv2"]) != 0 { + useIMDSv2, err := strconv.ParseBool(imageBundle["imdsv2"]) + if err != nil { + errMessage := err.Error() + " Setting default as false\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + useIMDSv2 = false + } + details.SetUseIMDSv2(useIMDSv2) + } + ib.SetDetails(details) + + if len(imageBundle["default"]) != 0 { + defaultBundle, err := strconv.ParseBool(imageBundle["default"]) + if err != nil { + errMessage := err.Error() + " Setting default as false\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + defaultBundle = false + } + ib.SetUseAsDefault(defaultBundle) + } + + } + + } + } + providerImageBundles[i] = ib + } + return providerImageBundles +} + +func editAWSImageBundleRegionOverrides( + bundleUUID string, + editImageBundleRegionOverrides []string, + imageBundleRegionOverrides map[string]ybaclient.BundleInfo, +) map[string]ybaclient.BundleInfo { + + if len(editImageBundleRegionOverrides) == 0 { + return imageBundleRegionOverrides + } + + for _, imageBundleString := range editImageBundleRegionOverrides { + override := providerutil.BuildImageBundleRegionOverrideMapFromString(imageBundleString, "edit") + if strings.Compare(override["uuid"], bundleUUID) == 0 { + for k, v := range override { + if _, ok := imageBundleRegionOverrides[k]; ok { + imageBundleRegionOverrides[k] = ybaclient.BundleInfo{ + YbImage: util.GetStringPointer(v), + } + } + } + } + + } + + return imageBundleRegionOverrides +} + +func addAWSImageBundles( + addImageBundles, + addImageBundleRegionOverrides []string, + providerImageBundles []ybaclient.ImageBundle, + numberOfRegions int) []ybaclient.ImageBundle { + if len(addImageBundles) == 0 { + return providerImageBundles + } + for _, i := range addImageBundles { + bundle := providerutil.BuildImageBundleMapFromString(i, "add") + + regionOverrides := buildAWSImageBundleRegionOverrides( + addImageBundleRegionOverrides, + bundle["name"]) + + if len(regionOverrides) < numberOfRegions { + logrus.Fatalf(formatter.Colorize( + "Machine Image must be provided for every region added.\n", + formatter.RedColor, + )) + } + + sshPort, err := strconv.ParseInt(bundle["ssh-port"], 10, 64) + if err != nil { + errMessage := err.Error() + " Using SSH Port as 22\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + sshPort = 22 + } + + defaultBundle, err := strconv.ParseBool(bundle["default"]) + if err != nil { + errMessage := err.Error() + " Setting default as false\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + defaultBundle = false + } + + useIMDSv2, err := strconv.ParseBool(bundle["imdsv2"]) + if err != nil { + errMessage := err.Error() + " Setting default as false\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + useIMDSv2 = false + } + + imageBundle := ybaclient.ImageBundle{ + Name: util.GetStringPointer(bundle["name"]), + UseAsDefault: util.GetBoolPointer(defaultBundle), + Details: &ybaclient.ImageBundleDetails{ + Arch: util.GetStringPointer(bundle["arch"]), + SshUser: util.GetStringPointer(bundle["ssh-user"]), + SshPort: util.GetInt32Pointer(int32(sshPort)), + UseIMDSv2: util.GetBoolPointer(useIMDSv2), + Regions: ®ionOverrides, + }, + } + providerImageBundles = append(providerImageBundles, imageBundle) + } + return providerImageBundles +} diff --git a/managed/yba-cli/cmd/provider/azu/create_provider.go b/managed/yba-cli/cmd/provider/azu/create_provider.go index 950df92ecd95..5e480e921383 100644 --- a/managed/yba-cli/cmd/provider/azu/create_provider.go +++ b/managed/yba-cli/cmd/provider/azu/create_provider.go @@ -7,6 +7,7 @@ package azu import ( "fmt" "os" + "strconv" "strings" "github.com/sirupsen/logrus" @@ -178,9 +179,15 @@ var createAzureProviderCmd = &cobra.Command{ logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) } + imageBundles, err := cmd.Flags().GetStringArray("image-bundle") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + requestBody := ybaclient.Provider{ Code: util.GetStringPointer(providerCode), AllAccessKeys: &allAccessKeys, + ImageBundles: buildAzureImageBundles(imageBundles), Name: util.GetStringPointer(providerName), Regions: buildAzureRegions(regions, zones), Details: &ybaclient.ProviderDetails{ @@ -240,11 +247,12 @@ func init() { createAzureProviderCmd.Flags().StringArray("region", []string{}, "[Required] Region associated with the Azure provider. Minimum number of required "+ "regions = 1. Provide the following comma separated fields as key-value pairs:"+ - "\"region-name=,"+ - "vnet=,sg-id=,yb-image=\". "+ + "\"region-name=,vnet=,sg-id=,"+ + "rg=,network-rg=\". "+ formatter.Colorize("Region name and Virtual network are required key-values.", formatter.GreenColor)+ - " Security Group ID and YB Image (AMI) are optional. "+ + " Security Group ID, Resource Group (override for this region) and Network "+ + "Resource Group (override for this region) are optional. "+ "Each region needs to be added using a separate --region flag. "+ "Example: --region region-name=westus2,vnet=") createAzureProviderCmd.Flags().StringArray("zone", []string{}, @@ -259,10 +267,26 @@ func init() { "Each zone needs to be added using a separate --zone flag. "+ "Example: --zone zone-name=westus2-1,region-name=westus2,subnet=") + createAzureProviderCmd.Flags().StringArray("image-bundle", []string{}, + "[Optional] Intel x86_64 image bundles associated with Azure provider. "+ + "Provide the following comma separated fields as key-value pairs: "+ + "\"image-bundle-name=,machine-image=,"+ + "ssh-user=,ssh-port=,default=\". "+ + formatter.Colorize( + "Image bundle name, machine image and SSH user are required key-value pairs.", + formatter.GreenColor)+ + " The default SSH Port is 22. Default marks the image bundle as default for the provider. "+ + "Each image bundle can be added using separate --image-bundle flag. "+ + "Example: --image-bundle =,machine-image=,"+ + "=,=22") + createAzureProviderCmd.Flags().String("ssh-user", "centos", "[Optional] SSH User to access the YugabyteDB nodes.") createAzureProviderCmd.Flags().Int("ssh-port", 22, "[Optional] SSH Port to access the YugabyteDB nodes.") + createAzureProviderCmd.Flags().MarkDeprecated("ssh-port", "Use --edit-image-bundle instead.") + createAzureProviderCmd.Flags().MarkDeprecated("ssh-user", "Use --edit-image-bundle instead.") + createAzureProviderCmd.Flags().String("custom-ssh-keypair-name", "", "[Optional] Provide custom key pair name to access YugabyteDB nodes. "+ "If left empty, "+ @@ -305,9 +329,10 @@ func buildAzureRegions(regionStrings, zoneStrings []string) (res []ybaclient.Reg Details: &ybaclient.RegionDetails{ CloudInfo: &ybaclient.RegionCloudInfo{ Azu: &ybaclient.AzureRegionCloudInfo{ - SecurityGroupId: util.GetStringPointer(region["sg-id"]), - Vnet: util.GetStringPointer(region["vnet"]), - YbImage: util.GetStringPointer(region["yb-image"]), + SecurityGroupId: util.GetStringPointer(region["sg-id"]), + Vnet: util.GetStringPointer(region["vnet"]), + AzuNetworkRGOverride: util.GetStringPointer(region["network-rg"]), + AzuRGOverride: util.GetStringPointer(region["rg"]), }, }, }, @@ -345,3 +370,59 @@ func buildAzureZones(zoneStrings []string, regionName string) (res []ybaclient.A } return res } + +func buildAzureImageBundles(imageBundles []string) []ybaclient.ImageBundle { + imageBundleLen := len(imageBundles) + res := make([]ybaclient.ImageBundle, 0) + for _, i := range imageBundles { + bundle := providerutil.BuildImageBundleMapFromString(i, "add") + bundle = providerutil.DefaultImageBundleValues(bundle) + + if _, ok := bundle["ssh-user"]; !ok { + logrus.Fatalln( + formatter.Colorize( + "SSH User not specified in image bundle.\n", + formatter.RedColor)) + } + + if _, ok := bundle["machine-image"]; !ok { + logrus.Fatalln( + formatter.Colorize("Machine Image not specified in image bundle.\n", + formatter.RedColor)) + } + + sshPort, err := strconv.ParseInt(bundle["ssh-port"], 10, 64) + if err != nil { + errMessage := err.Error() + " Using SSH Port as 22\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + sshPort = 22 + } + + defaultBundle, err := strconv.ParseBool(bundle["default"]) + if err != nil { + errMessage := err.Error() + " Setting default as false\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + defaultBundle = false + } + if imageBundleLen == 1 && !defaultBundle { + defaultBundle = true + } + + imageBundle := ybaclient.ImageBundle{ + Name: util.GetStringPointer(bundle["name"]), + UseAsDefault: util.GetBoolPointer(defaultBundle), + Details: &ybaclient.ImageBundleDetails{ + Arch: util.GetStringPointer(bundle["arch"]), + GlobalYbImage: util.GetStringPointer(bundle["machine-image"]), + SshUser: util.GetStringPointer(bundle["ssh-user"]), + SshPort: util.GetInt32Pointer(int32(sshPort)), + }, + } + res = append(res, imageBundle) + } + return res +} diff --git a/managed/yba-cli/cmd/provider/azu/update_provider.go b/managed/yba-cli/cmd/provider/azu/update_provider.go index 02835a88a467..6bc41988f0de 100644 --- a/managed/yba-cli/cmd/provider/azu/update_provider.go +++ b/managed/yba-cli/cmd/provider/azu/update_provider.go @@ -6,6 +6,7 @@ package azu import ( "fmt" + "strconv" "strings" "github.com/sirupsen/logrus" @@ -78,6 +79,8 @@ var updateAzureProviderCmd = &cobra.Command{ cloudInfo := details.GetCloudInfo() azureCloudInfo := cloudInfo.GetAzu() + providerImageBundles := provider.GetImageBundles() + newProviderName, err := cmd.Flags().GetString("new-name") if err != nil { logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) @@ -255,6 +258,38 @@ var updateAzureProviderCmd = &cobra.Command{ provider.SetRegions(providerRegions) // End of Updating Regions + // Update Image Bundles + + addImageBundles, err := cmd.Flags().GetStringArray("add-image-bundle") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + editImageBundles, err := cmd.Flags().GetStringArray("edit-image-bundle") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + removeImageBundles, err := cmd.Flags().GetStringArray("remove-image-bundle") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + providerImageBundles = removeAzureImageBundles(removeImageBundles, providerImageBundles) + + providerImageBundles = editAzureImageBundles( + editImageBundles, + providerImageBundles) + + providerImageBundles = addAzureImageBundles( + addImageBundles, + providerImageBundles, + ) + + provider.SetImageBundles(providerImageBundles) + + // End of Updating Image Bundles + rUpdate, response, err := authAPI.EditProvider(provider.GetUuid()). EditProviderRequest(provider).Execute() if err != nil { @@ -314,10 +349,10 @@ func init() { "[Optional] Add region associated with the Azure provider. "+ "Provide the following comma separated fields as key-value pairs:"+ "\"region-name=,"+ - "vnet=,sg-id=,yb-image=\". "+ + "vnet=,sg-id=\". "+ formatter.Colorize("Region name and Virtual network are required key-values.", formatter.GreenColor)+ - " Security Group ID and YB Image (AMI) are optional. "+ + " Security Group ID is optional. "+ "Each region needs to be added using a separate --add-region flag.") updateAzureProviderCmd.Flags().StringArray("add-zone", []string{}, "[Optional] Zone associated to the Azure Region defined. "+ @@ -347,7 +382,7 @@ func init() { "[Optional] Edit region details associated with the Azure provider. "+ "Provide the following comma separated fields as key-value pairs:"+ "\"region-name=,"+ - "vnet=,sg-id=,yb-image=\". "+ + "vnet=,sg-id=\". "+ formatter.Colorize("Region name is a required key-value pair.", formatter.GreenColor)+ " Virtual network and Security Group ID are optional. "+ @@ -362,10 +397,42 @@ func init() { "Subnet IDs and Secondary subnet ID is optional. "+ "Each zone needs to be modified using a separate --edit-zone flag.") + updateAzureProviderCmd.Flags().StringArray("add-image-bundle", []string{}, + "[Optional] Add Intel x86_64 image bundles associated with the provider. "+ + "Provide the following comma separated fields as key-value pairs: "+ + "\"image-bundle-name=,machine-image=,"+ + "ssh-user=,ssh-port=,default=\". "+ + formatter.Colorize( + "Image bundle name, machine image and SSH user are required key-value pairs.", + formatter.GreenColor)+ + " The default SSH Port is 22. Default marks the image bundle as default for the provider. "+ + "Each image bundle can be added using separate --image-bundle flag. "+ + "Example: --add-image-bundle =,machine-image=,"+ + "=,=22") + + updateAzureProviderCmd.Flags().StringArray("edit-image-bundle", []string{}, + "[Optional] Edit Intel x86_64 image bundles associated with the provider. "+ + "Provide the following comma separated fields as key-value pairs: "+ + "\"image-bundle-uuid=,machine-image=,"+ + "ssh-user=,ssh-port=,default=\". "+ + formatter.Colorize( + "Image bundle UUID is a required key-value pair.", + formatter.GreenColor)+ + "Each image bundle can be added using separate --image-bundle flag. "+ + "Example: --edit-image-bundle =,machine-image=,"+ + "=,=22") + + updateAzureProviderCmd.Flags().StringArray("remove-image-bundle", []string{}, + "[Optional] Image bundle UUID to be removed from the provider. "+ + "Each bundle to be removed needs to be provided using a separate "+ + "--remove-image-bundle definition.") + updateAzureProviderCmd.Flags().String("ssh-user", "", "[Optional] Updating SSH User to access the YugabyteDB nodes.") updateAzureProviderCmd.Flags().Int("ssh-port", 0, "[Optional] Updating SSH Port to access the YugabyteDB nodes.") + updateAzureProviderCmd.Flags().MarkDeprecated("ssh-port", "Use --edit-image-bundle instead.") + updateAzureProviderCmd.Flags().MarkDeprecated("ssh-user", "Use --edit-image-bundle instead.") updateAzureProviderCmd.Flags().Bool("airgap-install", false, "[Optional] Are YugabyteDB nodes installed in an air-gapped environment,"+ @@ -453,7 +520,6 @@ func addAzureRegions( Details: &ybaclient.RegionDetails{ CloudInfo: &ybaclient.RegionCloudInfo{ Azu: &ybaclient.AzureRegionCloudInfo{ - YbImage: util.GetStringPointer(region["yb-image"]), SecurityGroupId: util.GetStringPointer(region["sg-id"]), Vnet: util.GetStringPointer(region["vnet"]), }, @@ -579,3 +645,138 @@ func addAzureZones( return zones } + +func removeAzureImageBundles( + removeImageBundles []string, + providerImageBundles []ybaclient.ImageBundle) []ybaclient.ImageBundle { + if len(removeImageBundles) == 0 { + return providerImageBundles + } + + for _, ib := range removeImageBundles { + for i, pIb := range providerImageBundles { + if strings.Compare(pIb.GetUuid(), ib) == 0 { + providerImageBundles = util.RemoveComponentFromSlice( + providerImageBundles, i, + ).([]ybaclient.ImageBundle) + } + } + } + + return providerImageBundles +} + +func editAzureImageBundles( + editImageBundles []string, + providerImageBundles []ybaclient.ImageBundle, +) []ybaclient.ImageBundle { + + for i, ib := range providerImageBundles { + bundleUUID := ib.GetUuid() + details := ib.GetDetails() + if len(editImageBundles) != 0 { + for _, imageBundleString := range editImageBundles { + imageBundle := providerutil.BuildImageBundleMapFromString(imageBundleString, "edit") + + if strings.Compare(imageBundle["uuid"], bundleUUID) == 0 { + if len(imageBundle["machine-image"]) != 0 { + details.SetGlobalYbImage(imageBundle["machine-image"]) + } + if len(imageBundle["ssh-user"]) != 0 { + details.SetSshUser(imageBundle["ssh-user"]) + } + if len(imageBundle["ssh-port"]) != 0 { + sshPort, err := strconv.ParseInt(imageBundle["ssh-port"], 10, 64) + if err != nil { + errMessage := err.Error() + " Using SSH Port as 22\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + sshPort = 22 + } + details.SetSshPort(int32(sshPort)) + } + + ib.SetDetails(details) + + if len(imageBundle["default"]) != 0 { + defaultBundle, err := strconv.ParseBool(imageBundle["default"]) + if err != nil { + errMessage := err.Error() + " Setting default as false\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + defaultBundle = false + } + ib.SetUseAsDefault(defaultBundle) + } + + } + + } + } + providerImageBundles[i] = ib + } + return providerImageBundles +} + +func addAzureImageBundles( + imageBundles []string, + providerImageBundles []ybaclient.ImageBundle, +) []ybaclient.ImageBundle { + if len(imageBundles) == 0 { + return providerImageBundles + } + imageBundleLen := len(imageBundles) + for _, i := range imageBundles { + bundle := providerutil.BuildImageBundleMapFromString(i, "add") + bundle = providerutil.DefaultImageBundleValues(bundle) + + if _, ok := bundle["ssh-user"]; !ok { + logrus.Fatalln( + formatter.Colorize( + "SSH User not specified in image bundle.\n", + formatter.RedColor)) + } + + if _, ok := bundle["machine-image"]; !ok { + logrus.Fatalln( + formatter.Colorize("Machine Image not specified in image bundle.\n", + formatter.RedColor)) + } + + sshPort, err := strconv.ParseInt(bundle["ssh-port"], 10, 64) + if err != nil { + errMessage := err.Error() + " Using SSH Port as 22\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + sshPort = 22 + } + + defaultBundle, err := strconv.ParseBool(bundle["default"]) + if err != nil { + errMessage := err.Error() + " Setting default as false\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + defaultBundle = false + } + if imageBundleLen == 1 && !defaultBundle { + defaultBundle = true + } + + imageBundle := ybaclient.ImageBundle{ + Name: util.GetStringPointer(bundle["name"]), + UseAsDefault: util.GetBoolPointer(defaultBundle), + Details: &ybaclient.ImageBundleDetails{ + Arch: util.GetStringPointer(bundle["arch"]), + GlobalYbImage: util.GetStringPointer(bundle["machine-image"]), + SshUser: util.GetStringPointer(bundle["ssh-user"]), + SshPort: util.GetInt32Pointer(int32(sshPort)), + }, + } + providerImageBundles = append(providerImageBundles, imageBundle) + } + return providerImageBundles +} diff --git a/managed/yba-cli/cmd/provider/gcp/create_provider.go b/managed/yba-cli/cmd/provider/gcp/create_provider.go index d6c39a002bc5..2440bdfece6d 100644 --- a/managed/yba-cli/cmd/provider/gcp/create_provider.go +++ b/managed/yba-cli/cmd/provider/gcp/create_provider.go @@ -7,6 +7,7 @@ package gcp import ( "fmt" "os" + "strconv" "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -195,10 +196,15 @@ var createGCPProviderCmd = &cobra.Command{ logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) } - requestBody := ybaclient.Provider{ - Code: util.GetStringPointer(providerCode), - Name: util.GetStringPointer(providerName), + imageBundles, err := cmd.Flags().GetStringArray("image-bundle") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + requestBody := ybaclient.Provider{ + Code: util.GetStringPointer(providerCode), + Name: util.GetStringPointer(providerName), + ImageBundles: buildGCPImageBundles(imageBundles), AllAccessKeys: &allAccessKeys, Details: &ybaclient.ProviderDetails{ AirGapInstall: util.GetBoolPointer(airgapInstall), @@ -259,8 +265,8 @@ func init() { createGCPProviderCmd.Flags().StringArray("region", []string{}, "[Required] Region associated with the GCP provider. Minimum number of required "+ - "regions = 1. Provide the following comma separated fields as key-value pairs:"+ - "\"region-name=,shared-subnet=,yb-image=,"+ + "regions = 1. Provide the following comma separated fields as key-value pairs: "+ + "\"region-name=,shared-subnet=,"+ "instance-template=\". "+ formatter.Colorize("Region name and Shared subnet are required key-value pairs.", formatter.GreenColor)+ @@ -268,10 +274,26 @@ func init() { "Each region can be added using separate --region flags. "+ "Example: --region region-name=us-west1,shared-subnet=") + createGCPProviderCmd.Flags().StringArray("image-bundle", []string{}, + "[Optional] Intel x86_64 image bundles associated with GCP provider. "+ + "Provide the following comma separated fields as key-value pairs: "+ + "\"image-bundle-name=,machine-image=,"+ + "ssh-user=,ssh-port=,default=\". "+ + formatter.Colorize( + "Image bundle name, machine image and SSH user are required key-value pairs.", + formatter.GreenColor)+ + " The default SSH Port is 22. Default marks the image bundle as default for the provider. "+ + "Each image bundle can be added using separate --image-bundle flag. "+ + "Example: --image-bundle =,machine-image=,"+ + "=,=22") + createGCPProviderCmd.Flags().String("ssh-user", "centos", "[Optional] SSH User to access the YugabyteDB nodes.") createGCPProviderCmd.Flags().Int("ssh-port", 22, "[Optional] SSH Port to access the YugabyteDB nodes.") + createGCPProviderCmd.Flags().MarkDeprecated("ssh-port", "Use --edit-image-bundle instead.") + createGCPProviderCmd.Flags().MarkDeprecated("ssh-user", "Use --edit-image-bundle instead.") + createGCPProviderCmd.Flags().String("custom-ssh-keypair-name", "", "[Optional] Provide custom key pair name to access YugabyteDB nodes. "+ "If left empty, "+ @@ -315,7 +337,6 @@ func buildGCPRegions(regionStrings []string, allowed bool, version string) ( Details: &ybaclient.RegionDetails{ CloudInfo: &ybaclient.RegionCloudInfo{ Gcp: &ybaclient.GCPRegionCloudInfo{ - YbImage: util.GetStringPointer(region["yb-image"]), InstanceTemplate: util.GetStringPointer(region["instance-template"]), }, }, @@ -343,3 +364,59 @@ func buildGCPZones(sharedSubnet string) (res []ybaclient.AvailabilityZone) { } return res } + +func buildGCPImageBundles(imageBundles []string) []ybaclient.ImageBundle { + imageBundleLen := len(imageBundles) + res := make([]ybaclient.ImageBundle, 0) + for _, i := range imageBundles { + bundle := providerutil.BuildImageBundleMapFromString(i, "add") + bundle = providerutil.DefaultImageBundleValues(bundle) + + if _, ok := bundle["ssh-user"]; !ok { + logrus.Fatalln( + formatter.Colorize( + "SSH User not specified in image bundle.\n", + formatter.RedColor)) + } + + if _, ok := bundle["machine-image"]; !ok { + logrus.Fatalln( + formatter.Colorize("Machine Image not specified in image bundle.\n", + formatter.RedColor)) + } + + sshPort, err := strconv.ParseInt(bundle["ssh-port"], 10, 64) + if err != nil { + errMessage := err.Error() + " Using SSH Port as 22\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + sshPort = 22 + } + + defaultBundle, err := strconv.ParseBool(bundle["default"]) + if err != nil { + errMessage := err.Error() + " Setting default as false\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + defaultBundle = false + } + if imageBundleLen == 1 && !defaultBundle { + defaultBundle = true + } + + imageBundle := ybaclient.ImageBundle{ + Name: util.GetStringPointer(bundle["name"]), + UseAsDefault: util.GetBoolPointer(defaultBundle), + Details: &ybaclient.ImageBundleDetails{ + Arch: util.GetStringPointer(bundle["arch"]), + GlobalYbImage: util.GetStringPointer(bundle["machine-image"]), + SshUser: util.GetStringPointer(bundle["ssh-user"]), + SshPort: util.GetInt32Pointer(int32(sshPort)), + }, + } + res = append(res, imageBundle) + } + return res +} diff --git a/managed/yba-cli/cmd/provider/gcp/update_provider.go b/managed/yba-cli/cmd/provider/gcp/update_provider.go index 6385a4a8fc75..486cd8c9cf6b 100644 --- a/managed/yba-cli/cmd/provider/gcp/update_provider.go +++ b/managed/yba-cli/cmd/provider/gcp/update_provider.go @@ -6,6 +6,7 @@ package gcp import ( "fmt" + "strconv" "strings" "github.com/sirupsen/logrus" @@ -97,6 +98,8 @@ var updateGCPProviderCmd = &cobra.Command{ cloudInfo := details.GetCloudInfo() gcpCloudInfo := cloudInfo.GetGcp() + providerImageBundles := provider.GetImageBundles() + newProviderName, err := cmd.Flags().GetString("new-name") if err != nil { logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) @@ -295,6 +298,38 @@ var updateGCPProviderCmd = &cobra.Command{ // End of Updating Regions + // Update Image Bundles + + addImageBundles, err := cmd.Flags().GetStringArray("add-image-bundle") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + editImageBundles, err := cmd.Flags().GetStringArray("edit-image-bundle") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + removeImageBundles, err := cmd.Flags().GetStringArray("remove-image-bundle") + if err != nil { + logrus.Fatalf(formatter.Colorize(err.Error()+"\n", formatter.RedColor)) + } + + providerImageBundles = removeGCPImageBundles(removeImageBundles, providerImageBundles) + + providerImageBundles = editGCPImageBundles( + editImageBundles, + providerImageBundles) + + providerImageBundles = addGCPImageBundles( + addImageBundles, + providerImageBundles, + ) + + provider.SetImageBundles(providerImageBundles) + + // End of Updating Image Bundles + rUpdate, response, err := authAPI.EditProvider(provider.GetUuid()). EditProviderRequest(provider).Execute() if err != nil { @@ -345,30 +380,62 @@ func init() { updateGCPProviderCmd.Flags().StringArray("add-region", []string{}, "[Required] Region associated with the GCP provider. Minimum number of required "+ "regions = 1. Provide the following comma separated fields as key-value pairs:"+ - "\"region-name=,shared-subnet=,yb-image=,"+ + "\"region-name=,shared-subnet=,"+ "instance-template=\". "+ formatter.Colorize("Region name and Shared subnet are required key-value pairs.", formatter.GreenColor)+ - " YB Image (AMI) and Instance Template are optional. "+ + " Instance Template is optional. "+ "Each region can be added using separate --add-region flags.") updateGCPProviderCmd.Flags().StringArray("edit-region", []string{}, "[Optional] Edit region details associated with the GCP provider. "+ "Provide the following comma separated fields as key-value pairs:"+ - "\"region-name=,shared-subnet=,yb-image=,"+ + "\"region-name=,shared-subnet=,"+ "instance-template=\". "+ formatter.Colorize("Region name is a required key-value pair.", formatter.GreenColor)+ - " Shared subnet, YB Image (AMI) and Instance Template are optional. "+ + " Shared subnet and Instance Template are optional. "+ "Each region needs to be modified using a separate --edit-region flag.") updateGCPProviderCmd.Flags().StringArray("remove-region", []string{}, "[Optional] Region name to be removed from the provider. "+ "Each region to be removed needs to be provided using a separate "+ "--remove-region definition. Removing a region removes the corresponding zones.") + updateGCPProviderCmd.Flags().StringArray("add-image-bundle", []string{}, + "[Optional] Add Intel x86_64 image bundles associated with the provider. "+ + "Provide the following comma separated fields as key-value pairs: "+ + "\"image-bundle-name=,machine-image=,"+ + "ssh-user=,ssh-port=,default=\". "+ + formatter.Colorize( + "Image bundle name, machine image and SSH user are required key-value pairs.", + formatter.GreenColor)+ + " The default SSH Port is 22. Default marks the image bundle as default for the provider. "+ + "Each image bundle can be added using separate --image-bundle flag. "+ + "Example: --add-image-bundle =,machine-image=,"+ + "=,=22") + + updateGCPProviderCmd.Flags().StringArray("edit-image-bundle", []string{}, + "[Optional] Edit Intel x86_64 image bundles associated with the provider. "+ + "Provide the following comma separated fields as key-value pairs: "+ + "\"image-bundle-uuid=,machine-image=,"+ + "ssh-user=,ssh-port=,default=\". "+ + formatter.Colorize( + "Image bundle UUID is a required key-value pair.", + formatter.GreenColor)+ + "Each image bundle can be added using separate --image-bundle flag. "+ + "Example: --edit-image-bundle =,machine-image=,"+ + "=,=22") + + updateGCPProviderCmd.Flags().StringArray("remove-image-bundle", []string{}, + "[Optional] Image bundle UUID to be removed from the provider. "+ + "Each bundle to be removed needs to be provided using a separate "+ + "--remove-image-bundle definition.") + updateGCPProviderCmd.Flags().String("ssh-user", "", "[Optional] Updating SSH User to access the YugabyteDB nodes.") updateGCPProviderCmd.Flags().Int("ssh-port", 0, "[Optional] Updating SSH Port to access the YugabyteDB nodes.") + updateGCPProviderCmd.Flags().MarkDeprecated("ssh-port", "Use --edit-image-bundle instead.") + updateGCPProviderCmd.Flags().MarkDeprecated("ssh-user", "Use --edit-image-bundle instead.") updateGCPProviderCmd.Flags().Bool("airgap-install", false, "[Optional] Are YugabyteDB nodes installed in an air-gapped environment,"+ @@ -413,9 +480,6 @@ func editGCPRegions( details := r.GetDetails() cloudInfo := details.GetCloudInfo() gcp := cloudInfo.GetGcp() - if len(region["yb-image"]) != 0 { - gcp.SetYbImage(region["yb-image"]) - } if len(region["shared-subnet"]) != 0 { zones := r.GetZones() for i, z := range zones { @@ -458,7 +522,6 @@ func addGCPRegions( Details: &ybaclient.RegionDetails{ CloudInfo: &ybaclient.RegionCloudInfo{ Gcp: &ybaclient.GCPRegionCloudInfo{ - YbImage: util.GetStringPointer(region["yb-image"]), InstanceTemplate: util.GetStringPointer(region["instance-template"]), }, }, @@ -485,3 +548,138 @@ func addGCPZones( } return zones } + +func removeGCPImageBundles( + removeImageBundles []string, + providerImageBundles []ybaclient.ImageBundle) []ybaclient.ImageBundle { + if len(removeImageBundles) == 0 { + return providerImageBundles + } + + for _, ib := range removeImageBundles { + for i, pIb := range providerImageBundles { + if strings.Compare(pIb.GetUuid(), ib) == 0 { + providerImageBundles = util.RemoveComponentFromSlice( + providerImageBundles, i, + ).([]ybaclient.ImageBundle) + } + } + } + + return providerImageBundles +} + +func editGCPImageBundles( + editImageBundles []string, + providerImageBundles []ybaclient.ImageBundle, +) []ybaclient.ImageBundle { + + for i, ib := range providerImageBundles { + bundleUUID := ib.GetUuid() + details := ib.GetDetails() + if len(editImageBundles) != 0 { + for _, imageBundleString := range editImageBundles { + imageBundle := providerutil.BuildImageBundleMapFromString(imageBundleString, "edit") + + if strings.Compare(imageBundle["uuid"], bundleUUID) == 0 { + if len(imageBundle["machine-image"]) != 0 { + details.SetGlobalYbImage(imageBundle["machine-image"]) + } + if len(imageBundle["ssh-user"]) != 0 { + details.SetSshUser(imageBundle["ssh-user"]) + } + if len(imageBundle["ssh-port"]) != 0 { + sshPort, err := strconv.ParseInt(imageBundle["ssh-port"], 10, 64) + if err != nil { + errMessage := err.Error() + " Using SSH Port as 22\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + sshPort = 22 + } + details.SetSshPort(int32(sshPort)) + } + + ib.SetDetails(details) + + if len(imageBundle["default"]) != 0 { + defaultBundle, err := strconv.ParseBool(imageBundle["default"]) + if err != nil { + errMessage := err.Error() + " Setting default as false\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + defaultBundle = false + } + ib.SetUseAsDefault(defaultBundle) + } + + } + + } + } + providerImageBundles[i] = ib + } + return providerImageBundles +} + +func addGCPImageBundles( + imageBundles []string, + providerImageBundles []ybaclient.ImageBundle, +) []ybaclient.ImageBundle { + if len(imageBundles) == 0 { + return providerImageBundles + } + imageBundleLen := len(imageBundles) + for _, i := range imageBundles { + bundle := providerutil.BuildImageBundleMapFromString(i, "add") + bundle = providerutil.DefaultImageBundleValues(bundle) + + if _, ok := bundle["ssh-user"]; !ok { + logrus.Fatalln( + formatter.Colorize( + "SSH User not specified in image bundle.\n", + formatter.RedColor)) + } + + if _, ok := bundle["machine-image"]; !ok { + logrus.Fatalln( + formatter.Colorize("Machine Image not specified in image bundle.\n", + formatter.RedColor)) + } + + sshPort, err := strconv.ParseInt(bundle["ssh-port"], 10, 64) + if err != nil { + errMessage := err.Error() + " Using SSH Port as 22\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + sshPort = 22 + } + + defaultBundle, err := strconv.ParseBool(bundle["default"]) + if err != nil { + errMessage := err.Error() + " Setting default as false\n" + logrus.Errorln( + formatter.Colorize(errMessage, formatter.YellowColor), + ) + defaultBundle = false + } + if imageBundleLen == 1 && !defaultBundle { + defaultBundle = true + } + + imageBundle := ybaclient.ImageBundle{ + Name: util.GetStringPointer(bundle["name"]), + UseAsDefault: util.GetBoolPointer(defaultBundle), + Details: &ybaclient.ImageBundleDetails{ + Arch: util.GetStringPointer(bundle["arch"]), + GlobalYbImage: util.GetStringPointer(bundle["machine-image"]), + SshUser: util.GetStringPointer(bundle["ssh-user"]), + SshPort: util.GetInt32Pointer(int32(sshPort)), + }, + } + providerImageBundles = append(providerImageBundles, imageBundle) + } + return providerImageBundles +} diff --git a/managed/yba-cli/cmd/provider/providerutil/updateproviderutil.go b/managed/yba-cli/cmd/provider/providerutil/updateproviderutil.go index 0a630c1b0ec0..9db904cdc7ef 100644 --- a/managed/yba-cli/cmd/provider/providerutil/updateproviderutil.go +++ b/managed/yba-cli/cmd/provider/providerutil/updateproviderutil.go @@ -218,12 +218,6 @@ func BuildRegionMapFromString( } else { ValueNotFoundForKeyError(key) } - case "yb-image": - if len(strings.TrimSpace(val)) != 0 { - region["yb-image"] = val - } else { - ValueNotFoundForKeyError(key) - } case "shared-subnet": if len(strings.TrimSpace(val)) != 0 { region["shared-subnet"] = val @@ -242,6 +236,18 @@ func BuildRegionMapFromString( } else { ValueNotFoundForKeyError(key) } + case "network-rg": + if len(strings.TrimSpace(val)) != 0 { + region["network-rg"] = val + } else { + ValueNotFoundForKeyError(key) + } + case "rg": + if len(strings.TrimSpace(val)) != 0 { + region["rg"] = val + } else { + ValueNotFoundForKeyError(key) + } case "latitude": if len(strings.TrimSpace(val)) != 0 { region["latitude"] = val @@ -312,3 +318,176 @@ func BuildRegionMapFromString( } return region } + +// BuildImageBundleMapFromString is to process image bundle flags +func BuildImageBundleMapFromString( + imageBundleString, operation string, +) map[string]string { + imageBundle := map[string]string{} + for _, ibInfo := range strings.Split(imageBundleString, ",") { + kvp := strings.Split(ibInfo, "=") + if len(kvp) != 2 { + logrus.Fatalln( + formatter.Colorize("Incorrect format in image bundle description.\n", + formatter.RedColor)) + } + key := kvp[0] + val := kvp[1] + switch key { + case "image-bundle-name": + if len(strings.TrimSpace(val)) != 0 { + imageBundle["name"] = val + } else { + ValueNotFoundForKeyError(key) + } + case "image-bundle-uuid": + if len(strings.TrimSpace(val)) != 0 { + imageBundle["uuid"] = val + } else { + ValueNotFoundForKeyError(key) + } + case "machine-image": + if len(strings.TrimSpace(val)) != 0 { + imageBundle["machine-image"] = val + } else { + ValueNotFoundForKeyError(key) + } + case "arch": + if len(strings.TrimSpace(val)) != 0 { + imageBundle["arch"] = val + } else { + ValueNotFoundForKeyError(key) + } + case "ssh-user": + if len(strings.TrimSpace(val)) != 0 { + imageBundle["ssh-user"] = val + } else { + ValueNotFoundForKeyError(key) + } + case "ssh-port": + if len(strings.TrimSpace(val)) != 0 { + imageBundle["ssh-port"] = val + } else { + ValueNotFoundForKeyError(key) + } + case "imdsv2": + if len(strings.TrimSpace(val)) != 0 { + imageBundle["imdsv2"] = val + } else { + ValueNotFoundForKeyError(key) + } + case "default": + if len(strings.TrimSpace(val)) != 0 { + imageBundle["default"] = val + } else { + ValueNotFoundForKeyError(key) + } + } + } + if strings.Compare(operation, "add") == 0 { + if _, ok := imageBundle["name"]; !ok { + logrus.Fatalln( + formatter.Colorize( + "Name not specified in image bundle.\n", + formatter.RedColor)) + } + } else if strings.Compare(operation, "edit") == 0 { + if _, ok := imageBundle["uuid"]; !ok { + logrus.Fatalln( + formatter.Colorize( + "Image bundle uuid not specified in image bundle.\n", + formatter.RedColor)) + } + } + + return imageBundle +} + +// BuildImageBundleRegionOverrideMapFromString is to process image bundle flags +func BuildImageBundleRegionOverrideMapFromString( + regionString, operation string, +) map[string]string { + override := map[string]string{} + for _, o := range strings.Split(regionString, ",") { + kvp := strings.Split(o, "=") + if len(kvp) != 2 { + logrus.Fatalln( + formatter.Colorize("Incorrect format in image bundle description.\n", + formatter.RedColor)) + } + key := kvp[0] + val := kvp[1] + switch key { + case "image-bundle-name": + if len(strings.TrimSpace(val)) != 0 { + override["name"] = val + } else { + ValueNotFoundForKeyError(key) + } + case "image-bundle-uuid": + if len(strings.TrimSpace(val)) != 0 { + override["uuid"] = val + } else { + ValueNotFoundForKeyError(key) + } + case "machine-image": + if len(strings.TrimSpace(val)) != 0 { + override["machine-image"] = val + } else { + ValueNotFoundForKeyError(key) + } + case "region-name": + if len(strings.TrimSpace(val)) != 0 { + override["region-name"] = val + } else { + ValueNotFoundForKeyError(key) + } + } + } + if strings.Compare(operation, "add") == 0 { + if _, ok := override["name"]; !ok { + logrus.Fatalln( + formatter.Colorize( + "Name not specified in image bundle region override.\n", + formatter.RedColor)) + } + } else if strings.Compare(operation, "edit") == 0 { + if _, ok := override["uuid"]; !ok { + logrus.Fatalln( + formatter.Colorize( + "Image bundle uuid not specified in image bundle region override.\n", + formatter.RedColor)) + } + } + if _, ok := override["machine-image"]; !ok { + logrus.Fatalln( + formatter.Colorize( + "Machine image not specified in image bundle region override.\n", + formatter.RedColor)) + } + if _, ok := override["region-name"]; !ok { + logrus.Fatalln( + formatter.Colorize( + "Region Name not specified in image bundle region override.\n", + formatter.RedColor)) + } + + return override +} + +// DefaultImageBundleValues is to set default values for image bundle +func DefaultImageBundleValues(imageBundle map[string]string) map[string]string { + if _, ok := imageBundle["ssh-port"]; !ok { + imageBundle["ssh-port"] = "22" + } + if _, ok := imageBundle["default"]; !ok { + imageBundle["default"] = "false" + } + if _, ok := imageBundle["imdsv2"]; !ok { + imageBundle["imdsv2"] = "false" + } + if _, ok := imageBundle["arch"]; !ok { + imageBundle["arch"] = util.X86_64 + } + return imageBundle +} diff --git a/managed/yba-cli/cmd/util/util.go b/managed/yba-cli/cmd/util/util.go index ff93cd0602ba..793ee7b5dae5 100644 --- a/managed/yba-cli/cmd/util/util.go +++ b/managed/yba-cli/cmd/util/util.go @@ -364,3 +364,17 @@ func YAMLtoString(filePath string) string { func IsOutputType(t string) bool { return viper.GetString("output") == t } + +// RemoveComponentFromSlice removes the component from the slice +func RemoveComponentFromSlice(sliceInterface interface{}, index int) interface{} { + slice := sliceInterface.([]interface{}) + length := len(slice) + for i := range slice { + if i == index && i != length-1 { + return append(slice[:i], slice[i+1:]...) + } else if i == length-1 { + return slice[:i] + } + } + return slice +} diff --git a/managed/yba-cli/docs/yba_provider_aws_create.md b/managed/yba-cli/docs/yba_provider_aws_create.md index fa1604407a4c..38ab6aac8143 100644 --- a/managed/yba-cli/docs/yba_provider_aws_create.md +++ b/managed/yba-cli/docs/yba_provider_aws_create.md @@ -23,19 +23,19 @@ yba provider aws create -n \ ### Options ``` - --access-key-id string AWS Access Key ID. Required for non IAM role based providers. Can also be set using environment variable AWS_ACCESS_KEY_ID. - --secret-access-key string AWS Secret Access Key. Required for non IAM role based providers. Can also be set using environment variable AWS_SECRET_ACCESS_KEY. - --use-iam-instance-profile [Optional] Use IAM Role from the YugabyteDB Anywhere Host. Provider creation will fail on insufficient permissions on the host. (default false) - --hosted-zone-id string [Optional] Hosted Zone ID corresponding to Amazon Route53. - --region stringArray [Required] Region associated with the AWS provider. Minimum number of required regions = 1. Provide the following comma separated fields as key-value pairs:"region-name=,vpc-id=,sg-id=,arch=,yb-image=". Region name is required key-value. VPC ID, Security Group ID, YB Image (AMI) and Architecture (Default to x86_84) are optional. Each region needs to be added using a separate --region flag. Example: --region region-name=us-west-2,vpc-id=,sg-id= --region region-name=us-east-2,vpc-id=,sg-id= - --zone stringArray [Required] Zone associated to the AWS Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=,subnet=,secondary-subnet=". Zone name, Region name and subnet IDs are required values. Secondary subnet ID is optional. Each --region definition must have atleast one corresponding --zone definition. Multiple --zone definitions can be provided per region.Each zone needs to be added using a separate --zone flag. Example: --zone zone-name=us-west-2a,region-name=us-west-2,subnet= --zone zone-name=us-west-2b,region-name=us-west-2,subnet= - --ssh-user string [Optional] SSH User to access the YugabyteDB nodes. (default "ec2-user") - --ssh-port int [Optional] SSH Port to access the YugabyteDB nodes. (default 22) - --custom-ssh-keypair-name string [Optional] Provide custom key pair name to access YugabyteDB nodes. If left empty, YugabyteDB Anywhere will generate key pairs to access YugabyteDB nodes. - --custom-ssh-keypair-file-path string [Optional] Provide custom key pair file path to access YugabyteDB nodes. Required with --custom-ssh-keypair-name. - --airgap-install [Optional] Are YugabyteDB nodes installed in an air-gapped environment, lacking access to the public internet for package downloads. (default false) - --ntp-servers stringArray [Optional] List of NTP Servers. Can be provided as separate flags or as comma-separated values. - -h, --help help for create + --access-key-id string AWS Access Key ID. Required for non IAM role based providers. Can also be set using environment variable AWS_ACCESS_KEY_ID. + --secret-access-key string AWS Secret Access Key. Required for non IAM role based providers. Can also be set using environment variable AWS_SECRET_ACCESS_KEY. + --use-iam-instance-profile [Optional] Use IAM Role from the YugabyteDB Anywhere Host. Provider creation will fail on insufficient permissions on the host. (default false) + --hosted-zone-id string [Optional] Hosted Zone ID corresponding to Amazon Route53. + --region stringArray [Required] Region associated with the AWS provider. Minimum number of required regions = 1. Provide the following comma separated fields as key-value pairs:"region-name=,vpc-id=,sg-id=". Region name is required key-value. VPC ID and Security Group ID are optional. Each region needs to be added using a separate --region flag. Example: --region region-name=us-west-2,vpc-id=,sg-id= --region region-name=us-east-2,vpc-id=,sg-id= + --zone stringArray [Required] Zone associated to the AWS Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=,subnet=,secondary-subnet=". Zone name, Region name and subnet IDs are required values. Secondary subnet ID is optional. Each --region definition must have atleast one corresponding --zone definition. Multiple --zone definitions can be provided per region.Each zone needs to be added using a separate --zone flag. Example: --zone zone-name=us-west-2a,region-name=us-west-2,subnet= --zone zone-name=us-west-2b,region-name=us-west-2,subnet= + --image-bundle stringArray [Optional] Image bundles associated with AWS provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,arch=,ssh-user=,ssh-port=,imdsv2=,default=". Image bundle name, architecture and SSH user are required key-value pairs. The default for SSH Port is 22, IMDSv2 (This should be true if the Image bundle requires Instance Metadata Service v2) is false. Default marks the image bundle as default for the provider. Allowed values for architecture are x86_64 and arm64.Each image bundle can be added using separate --image-bundle flag. Example: --image-bundle =,=,=22 + --image-bundle-region-override stringArray [Optional] Image bundle region overrides associated with AWS provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,region-name=,machine-image=". All are required key-value pairs. Each --image-bundle definition must have atleast one corresponding --image-bundle-region-override definition for every region added. Each image bundle can be added using separate --image-bundle-region-override flag. Example: --image-bundle =,=,= + --custom-ssh-keypair-name string [Optional] Provide custom key pair name to access YugabyteDB nodes. If left empty, YugabyteDB Anywhere will generate key pairs to access YugabyteDB nodes. + --custom-ssh-keypair-file-path string [Optional] Provide custom key pair file path to access YugabyteDB nodes. Required with --custom-ssh-keypair-name. + --airgap-install [Optional] Are YugabyteDB nodes installed in an air-gapped environment, lacking access to the public internet for package downloads. (default false) + --ntp-servers stringArray [Optional] List of NTP Servers. Can be provided as separate flags or as comma-separated values. + -h, --help help for create ``` ### Options inherited from parent commands diff --git a/managed/yba-cli/docs/yba_provider_aws_update.md b/managed/yba-cli/docs/yba_provider_aws_update.md index 68aa8574ff78..e7d2d34ab5b6 100644 --- a/managed/yba-cli/docs/yba_provider_aws_update.md +++ b/managed/yba-cli/docs/yba_provider_aws_update.md @@ -13,22 +13,25 @@ yba provider aws update [flags] ### Options ``` - --new-name string [Optional] Updating provider name. - --access-key-id string [Optional] AWS Access Key ID. Required if provider does not use IAM instance profile. Required with secret-access-key. - --secret-access-key string [Optional] AWS Secret Access Key. Required if provider does not use IAM instance profile. Required with access-key-id. - --use-iam-instance-profile [Optional] Use IAM Role from the YugabyteDB Anywhere Host. - --hosted-zone-id string [Optional] Updating Hosted Zone ID corresponding to Amazon Route53. - --add-region stringArray [Optional] Add region associated with the AWS provider. Provide the following comma separated fields as key-value pairs:"region-name=,vpc-id=,sg-id=,arch=". Region name is required key-value. VPC ID, Security Group ID and Architecture are optional. Each region needs to be added using a separate --add-region flag. - --add-zone stringArray [Optional] Zone associated to the AWS Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=,subnet=,secondary-subnet=". Zone name, Region name and subnet IDs are required values. Secondary subnet ID is optional. Each --add-region definition must have atleast one corresponding --add-zone definition. Multiple --add-zone definitions can be provided for new and existing regions.Each zone needs to be added using a separate --add-zone flag. - --remove-region stringArray [Optional] Region name to be removed from the provider. Each region to be removed needs to be provided using a separate --remove-region definition. Removing a region removes the corresponding zones. - --remove-zone stringArray [Optional] Remove zone associated to the AWS Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=". Zone name, Region name are required values. Each zone needs to be removed using a separate --remove-zone flag. - --edit-region stringArray [Optional] Edit region details associated with the AWS provider. Provide the following comma separated fields as key-value pairs:"region-name=,vpc-id=,sg-id=". Region name is a required key-value pair. VPC ID and Security Group ID are optional. Each region needs to be modified using a separate --edit-region flag. - --edit-zone stringArray [Optional] Edit zone associated to the AWS Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=,subnet=,secondary-subnet=". Zone name, Region name are required values. Subnet IDs and Secondary subnet ID is optional. Each zone needs to be modified using a separate --edit-zone flag. - --ssh-user string [Optional] Updating SSH User to access the YugabyteDB nodes. - --ssh-port int [Optional] Updating SSH Port to access the YugabyteDB nodes. - --airgap-install [Optional] Are YugabyteDB nodes installed in an air-gapped environment, lacking access to the public internet for package downloads. (default false) - --ntp-servers stringArray [Optional] List of NTP Servers. Can be provided as separate flags or as comma-separated values. - -h, --help help for update + --new-name string [Optional] Updating provider name. + --access-key-id string [Optional] AWS Access Key ID. Required if provider does not use IAM instance profile. Required with secret-access-key. + --secret-access-key string [Optional] AWS Secret Access Key. Required if provider does not use IAM instance profile. Required with access-key-id. + --use-iam-instance-profile [Optional] Use IAM Role from the YugabyteDB Anywhere Host. + --hosted-zone-id string [Optional] Updating Hosted Zone ID corresponding to Amazon Route53. + --add-region stringArray [Optional] Add region associated with the AWS provider. Provide the following comma separated fields as key-value pairs:"region-name=,vpc-id=,sg-id=". Region name is required key-value. VPC ID and Security Group ID are optional. Each region needs to be added using a separate --add-region flag. + --add-zone stringArray [Optional] Zone associated to the AWS Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=,subnet=,secondary-subnet=". Zone name, Region name and subnet IDs are required values. Secondary subnet ID is optional. Each --add-region definition must have atleast one corresponding --add-zone definition. Multiple --add-zone definitions can be provided for new and existing regions.Each zone needs to be added using a separate --add-zone flag. + --remove-region stringArray [Optional] Region name to be removed from the provider. Each region to be removed needs to be provided using a separate --remove-region definition. Removing a region removes the corresponding zones. + --remove-zone stringArray [Optional] Remove zone associated to the AWS Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=". Zone name, Region name are required values. Each zone needs to be removed using a separate --remove-zone flag. + --edit-region stringArray [Optional] Edit region details associated with the AWS provider. Provide the following comma separated fields as key-value pairs:"region-name=,vpc-id=,sg-id=". Region name is a required key-value pair. VPC ID and Security Group ID are optional. Each region needs to be modified using a separate --edit-region flag. + --edit-zone stringArray [Optional] Edit zone associated to the AWS Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=,subnet=,secondary-subnet=". Zone name, Region name are required values. Subnet IDs and Secondary subnet ID is optional. Each zone needs to be modified using a separate --edit-zone flag. + --add-image-bundle stringArray [Optional] Add Image bundles associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,arch=,ssh-user=,ssh-port=,imdsv2=,default=". Image bundle name, architecture and SSH user are required key-value pairs. The default for SSH Port is 22, IMDSv2 (This should be true if the Image bundle requires Instance Metadata Service v2) is false. Default marks the image bundle as default for the provider. Allowed values for architecture are x86_64 and arm64.Each image bundle can be added using separate --image-bundle flag. Example: --add-image-bundle =,=,=22 + --add-image-bundle-region-override stringArray [Optional] Add Image bundle region overrides associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,region-name=,machine-image=". All are required key-value pairs. Each --image-bundle definition must have atleast one corresponding --image-bundle-region-override definition for every region added. Each override can be added using separate --image-bundle-region-override flag. Example: --add-image-bundle-region-override =,=,= + --edit-image-bundle stringArray [Optional] Edit Image bundles associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-uuid=,ssh-user=,ssh-port=,imdsv2=,default=". Image bundle UUID is a required key-value pair. Each image bundle can be edited using separate --image-bundle flag. Example: --edit-image-bundle =,=,=22 + --edit-image-bundle-region-override stringArray [Optional] Edit overrides of the region associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-uuid=,region-name=,machine-image=". All are required key-value pairs. Each image bundle can be added using separate --image-bundle-region-override flag. Example: --edit-image-bundle-region-override =,=,= + --remove-image-bundle stringArray [Optional] Image bundle UUID to be removed from the provider. Each bundle to be removed needs to be provided using a separate --remove-image-bundle definition. Removing a image bundle removes the corresponding region overrides. + --airgap-install [Optional] Are YugabyteDB nodes installed in an air-gapped environment, lacking access to the public internet for package downloads. (default false) + --ntp-servers stringArray [Optional] List of NTP Servers. Can be provided as separate flags or as comma-separated values. + -h, --help help for update ``` ### Options inherited from parent commands diff --git a/managed/yba-cli/docs/yba_provider_azure_create.md b/managed/yba-cli/docs/yba_provider_azure_create.md index 8cfc3156ce67..050131a703d0 100644 --- a/managed/yba-cli/docs/yba_provider_azure_create.md +++ b/managed/yba-cli/docs/yba_provider_azure_create.md @@ -33,10 +33,9 @@ yba provider azure create [flags] --network-subscription-id string Azure Network Subscription ID. --network-rg string Azure Network Resource Group. --hosted-zone-id string [Optional] Hosted Zone ID corresponging to Private DNS Zone. - --region stringArray [Required] Region associated with the Azure provider. Minimum number of required regions = 1. Provide the following comma separated fields as key-value pairs:"region-name=,vnet=,sg-id=,yb-image=". Region name and Virtual network are required key-values. Security Group ID and YB Image (AMI) are optional. Each region needs to be added using a separate --region flag. Example: --region region-name=westus2,vnet= + --region stringArray [Required] Region associated with the Azure provider. Minimum number of required regions = 1. Provide the following comma separated fields as key-value pairs:"region-name=,vnet=,sg-id=,rg=,network-rg=". Region name and Virtual network are required key-values. Security Group ID, Resource Group (override for this region) and Network Resource Group (override for this region) are optional. Each region needs to be added using a separate --region flag. Example: --region region-name=westus2,vnet= --zone stringArray [Required] Zone associated to the Azure Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=,subnet=".Zone name, Region name and subnet IDs are required values. Secondary subnet ID is optional. Each --region definition must have atleast one corresponding --zone definition. Multiple --zone definitions can be provided per region.Each zone needs to be added using a separate --zone flag. Example: --zone zone-name=westus2-1,region-name=westus2,subnet= - --ssh-user string [Optional] SSH User to access the YugabyteDB nodes. (default "centos") - --ssh-port int [Optional] SSH Port to access the YugabyteDB nodes. (default 22) + --image-bundle stringArray [Optional] Intel x86_64 image bundles associated with Azure provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,machine-image=,ssh-user=,ssh-port=,default=". Image bundle name, machine image and SSH user are required key-value pairs. The default SSH Port is 22. Default marks the image bundle as default for the provider. Each image bundle can be added using separate --image-bundle flag. Example: --image-bundle =,machine-image=,=,=22 --custom-ssh-keypair-name string [Optional] Provide custom key pair name to access YugabyteDB nodes. If left empty, YugabyteDB Anywhere will generate key pairs to access YugabyteDB nodes. --custom-ssh-keypair-file-path string [Optional] Provide custom key pair file path to access YugabyteDB nodes. Required with --custom-ssh-keypair-name. --airgap-install [Optional] Are YugabyteDB nodes installed in an air-gapped environment, lacking access to the public internet for package downloads. (default false) diff --git a/managed/yba-cli/docs/yba_provider_azure_update.md b/managed/yba-cli/docs/yba_provider_azure_update.md index a1534f383a03..c0f1b0aaf30f 100644 --- a/managed/yba-cli/docs/yba_provider_azure_update.md +++ b/managed/yba-cli/docs/yba_provider_azure_update.md @@ -13,26 +13,27 @@ yba provider azure update [flags] ### Options ``` - --new-name string [Optional] Updating provider name. - --client-id string [Optional] Update Azure Client ID. Required with client-secret, tenant-id, subscription-id, rg - --client-secret string [Optional] Update Azure Client Secret. Required with client-id, tenant-id, subscription-id, rg - --tenant-id string [Optional] Update Azure Tenant ID. Required with client-secret, client-id, subscription-id, rg - --subscription-id string [Optional] Update Azure Subscription ID. Required with client-id, client-secret, tenant-id, rg - --rg string [Optional] Update Azure Resource Group. Required with client-id, client-secret, tenant-id, subscription-id - --network-subscription-id string [Optional] Update Azure Network Subscription ID. - --network-rg string [Optional] Update Azure Network Resource Group. - --hosted-zone-id string [Optional] Update Hosted Zone ID corresponging to Private DNS Zone. - --add-region stringArray [Optional] Add region associated with the Azure provider. Provide the following comma separated fields as key-value pairs:"region-name=,vnet=,sg-id=,yb-image=". Region name and Virtual network are required key-values. Security Group ID and YB Image (AMI) are optional. Each region needs to be added using a separate --add-region flag. - --add-zone stringArray [Optional] Zone associated to the Azure Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=,subnet=".Zone name, Region name and subnet IDs are required values. Secondary subnet ID is optional. Each --add-region definition must have atleast one corresponding --add-zone definition. Multiple --add-zone definitions can be provided per region.Each zone needs to be added using a separate --add-zone flag. - --remove-region stringArray [Optional] Region name to be removed from the provider. Each region to be removed needs to be provided using a separate --remove-region definition. Removing a region removes the corresponding zones. - --remove-zone stringArray [Optional] Remove zone associated to the Azure Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=". Zone name, Region name are required values. Each zone needs to be removed using a separate --remove-zone flag. - --edit-region stringArray [Optional] Edit region details associated with the Azure provider. Provide the following comma separated fields as key-value pairs:"region-name=,vnet=,sg-id=,yb-image=". Region name is a required key-value pair. Virtual network and Security Group ID are optional. Each region needs to be modified using a separate --edit-region flag. - --edit-zone stringArray [Optional] Edit zone associated to the Azure Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=,subnet=,secondary-subnet=". Zone name, Region name are required values. Subnet IDs and Secondary subnet ID is optional. Each zone needs to be modified using a separate --edit-zone flag. - --ssh-user string [Optional] Updating SSH User to access the YugabyteDB nodes. - --ssh-port int [Optional] Updating SSH Port to access the YugabyteDB nodes. - --airgap-install [Optional] Are YugabyteDB nodes installed in an air-gapped environment, lacking access to the public internet for package downloads. (default false) - --ntp-servers stringArray [Optional] List of NTP Servers. Can be provided as separate flags or as comma-separated values. - -h, --help help for update + --new-name string [Optional] Updating provider name. + --client-id string [Optional] Update Azure Client ID. Required with client-secret, tenant-id, subscription-id, rg + --client-secret string [Optional] Update Azure Client Secret. Required with client-id, tenant-id, subscription-id, rg + --tenant-id string [Optional] Update Azure Tenant ID. Required with client-secret, client-id, subscription-id, rg + --subscription-id string [Optional] Update Azure Subscription ID. Required with client-id, client-secret, tenant-id, rg + --rg string [Optional] Update Azure Resource Group. Required with client-id, client-secret, tenant-id, subscription-id + --network-subscription-id string [Optional] Update Azure Network Subscription ID. + --network-rg string [Optional] Update Azure Network Resource Group. + --hosted-zone-id string [Optional] Update Hosted Zone ID corresponging to Private DNS Zone. + --add-region stringArray [Optional] Add region associated with the Azure provider. Provide the following comma separated fields as key-value pairs:"region-name=,vnet=,sg-id=". Region name and Virtual network are required key-values. Security Group ID is optional. Each region needs to be added using a separate --add-region flag. + --add-zone stringArray [Optional] Zone associated to the Azure Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=,subnet=".Zone name, Region name and subnet IDs are required values. Secondary subnet ID is optional. Each --add-region definition must have atleast one corresponding --add-zone definition. Multiple --add-zone definitions can be provided per region.Each zone needs to be added using a separate --add-zone flag. + --remove-region stringArray [Optional] Region name to be removed from the provider. Each region to be removed needs to be provided using a separate --remove-region definition. Removing a region removes the corresponding zones. + --remove-zone stringArray [Optional] Remove zone associated to the Azure Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=". Zone name, Region name are required values. Each zone needs to be removed using a separate --remove-zone flag. + --edit-region stringArray [Optional] Edit region details associated with the Azure provider. Provide the following comma separated fields as key-value pairs:"region-name=,vnet=,sg-id=". Region name is a required key-value pair. Virtual network and Security Group ID are optional. Each region needs to be modified using a separate --edit-region flag. + --edit-zone stringArray [Optional] Edit zone associated to the Azure Region defined. Provide the following comma separated fields as key-value pairs:"zone-name=,region-name=,subnet=,secondary-subnet=". Zone name, Region name are required values. Subnet IDs and Secondary subnet ID is optional. Each zone needs to be modified using a separate --edit-zone flag. + --add-image-bundle stringArray [Optional] Add Intel x86_64 image bundles associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,machine-image=,ssh-user=,ssh-port=,default=". Image bundle name, machine image and SSH user are required key-value pairs. The default SSH Port is 22. Default marks the image bundle as default for the provider. Each image bundle can be added using separate --image-bundle flag. Example: --add-image-bundle =,machine-image=,=,=22 + --edit-image-bundle stringArray [Optional] Edit Intel x86_64 image bundles associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-uuid=,machine-image=,ssh-user=,ssh-port=,default=". Image bundle UUID is a required key-value pair.Each image bundle can be added using separate --image-bundle flag. Example: --edit-image-bundle =,machine-image=,=,=22 + --remove-image-bundle stringArray [Optional] Image bundle UUID to be removed from the provider. Each bundle to be removed needs to be provided using a separate --remove-image-bundle definition. + --airgap-install [Optional] Are YugabyteDB nodes installed in an air-gapped environment, lacking access to the public internet for package downloads. (default false) + --ntp-servers stringArray [Optional] List of NTP Servers. Can be provided as separate flags or as comma-separated values. + -h, --help help for update ``` ### Options inherited from parent commands diff --git a/managed/yba-cli/docs/yba_provider_gcp_create.md b/managed/yba-cli/docs/yba_provider_gcp_create.md index 1a399643f6ca..f729f0222da3 100644 --- a/managed/yba-cli/docs/yba_provider_gcp_create.md +++ b/managed/yba-cli/docs/yba_provider_gcp_create.md @@ -31,9 +31,8 @@ yba provider gcp create -n dkumar-cli \ --use-host-vpc [Optional] Using VPC from YugabyteDB Anywhere Host. If set to false, specify an exsiting VPC using --network. Ignored if create-vpc is set. (default false) --project-id string [Optional] Project ID that hosts universe nodes in GCP. --shared-vpc-project-id string [Optional] Shared VPC project ID in GCP. - --region stringArray [Required] Region associated with the GCP provider. Minimum number of required regions = 1. Provide the following comma separated fields as key-value pairs:"region-name=,shared-subnet=,yb-image=,instance-template=". Region name and Shared subnet are required key-value pairs. YB Image (AMI) and Instance Template are optional. Each region can be added using separate --region flags. Example: --region region-name=us-west1,shared-subnet= - --ssh-user string [Optional] SSH User to access the YugabyteDB nodes. (default "centos") - --ssh-port int [Optional] SSH Port to access the YugabyteDB nodes. (default 22) + --region stringArray [Required] Region associated with the GCP provider. Minimum number of required regions = 1. Provide the following comma separated fields as key-value pairs: "region-name=,shared-subnet=,instance-template=". Region name and Shared subnet are required key-value pairs. YB Image (AMI) and Instance Template are optional. Each region can be added using separate --region flags. Example: --region region-name=us-west1,shared-subnet= + --image-bundle stringArray [Optional] Intel x86_64 image bundles associated with GCP provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,machine-image=,ssh-user=,ssh-port=,default=". Image bundle name, machine image and SSH user are required key-value pairs. The default SSH Port is 22. Default marks the image bundle as default for the provider. Each image bundle can be added using separate --image-bundle flag. Example: --image-bundle =,machine-image=,=,=22 --custom-ssh-keypair-name string [Optional] Provide custom key pair name to access YugabyteDB nodes. If left empty, YugabyteDB Anywhere will generate key pairs to access YugabyteDB nodes. --custom-ssh-keypair-file-path string [Optional] Provide custom key pair file path to access YugabyteDB nodes. Required with --custom-ssh-keypair-name. --airgap-install [Optional] Are YugabyteDB nodes installed in an air-gapped environment, lacking access to the public internet for package downloads. (default false) diff --git a/managed/yba-cli/docs/yba_provider_gcp_update.md b/managed/yba-cli/docs/yba_provider_gcp_update.md index 554f9d3a4ad5..139ba4718f1c 100644 --- a/managed/yba-cli/docs/yba_provider_gcp_update.md +++ b/managed/yba-cli/docs/yba_provider_gcp_update.md @@ -13,23 +13,24 @@ yba provider gcp update [flags] ### Options ``` - --new-name string [Optional] Updating provider name. - --use-host-credentials [Optional] Enabling YugabyteDB Anywhere Host credentials in GCP. Explicitly mark as false to disable on a provider made with host credentials. - --credentials string [Optional] GCP Service Account credentials file path. Required for providers not using host credentials. - --network string [Optional] Update Custom GCE network name. Required if create-vpc is true or use-host-vpc is false. - --yb-firewall-tags string [Optional] Update tags for firewall rules in GCP. - --create-vpc [Optional] Creating a new VPC network in GCP (Beta Feature). Specify VPC name using --network. - --use-host-vpc [Optional] Using VPC from YugabyteDB Anywhere Host. If set to false, specify an exsiting VPC using --network. Ignored if create-vpc is set. - --project-id string [Optional] Update project ID that hosts universe nodes in GCP. - --shared-vpc-project-id string [Optional] Update shared VPC project ID in GCP. - --add-region stringArray [Required] Region associated with the GCP provider. Minimum number of required regions = 1. Provide the following comma separated fields as key-value pairs:"region-name=,shared-subnet=,yb-image=,instance-template=". Region name and Shared subnet are required key-value pairs. YB Image (AMI) and Instance Template are optional. Each region can be added using separate --add-region flags. - --edit-region stringArray [Optional] Edit region details associated with the GCP provider. Provide the following comma separated fields as key-value pairs:"region-name=,shared-subnet=,yb-image=,instance-template=". Region name is a required key-value pair. Shared subnet, YB Image (AMI) and Instance Template are optional. Each region needs to be modified using a separate --edit-region flag. - --remove-region stringArray [Optional] Region name to be removed from the provider. Each region to be removed needs to be provided using a separate --remove-region definition. Removing a region removes the corresponding zones. - --ssh-user string [Optional] Updating SSH User to access the YugabyteDB nodes. - --ssh-port int [Optional] Updating SSH Port to access the YugabyteDB nodes. - --airgap-install [Optional] Are YugabyteDB nodes installed in an air-gapped environment, lacking access to the public internet for package downloads. - --ntp-servers stringArray [Optional] List of NTP Servers. Can be provided as separate flags or as comma-separated values. - -h, --help help for update + --new-name string [Optional] Updating provider name. + --use-host-credentials [Optional] Enabling YugabyteDB Anywhere Host credentials in GCP. Explicitly mark as false to disable on a provider made with host credentials. + --credentials string [Optional] GCP Service Account credentials file path. Required for providers not using host credentials. + --network string [Optional] Update Custom GCE network name. Required if create-vpc is true or use-host-vpc is false. + --yb-firewall-tags string [Optional] Update tags for firewall rules in GCP. + --create-vpc [Optional] Creating a new VPC network in GCP (Beta Feature). Specify VPC name using --network. + --use-host-vpc [Optional] Using VPC from YugabyteDB Anywhere Host. If set to false, specify an exsiting VPC using --network. Ignored if create-vpc is set. + --project-id string [Optional] Update project ID that hosts universe nodes in GCP. + --shared-vpc-project-id string [Optional] Update shared VPC project ID in GCP. + --add-region stringArray [Required] Region associated with the GCP provider. Minimum number of required regions = 1. Provide the following comma separated fields as key-value pairs:"region-name=,shared-subnet=,instance-template=". Region name and Shared subnet are required key-value pairs. Instance Template is optional. Each region can be added using separate --add-region flags. + --edit-region stringArray [Optional] Edit region details associated with the GCP provider. Provide the following comma separated fields as key-value pairs:"region-name=,shared-subnet=,instance-template=". Region name is a required key-value pair. Shared subnet and Instance Template are optional. Each region needs to be modified using a separate --edit-region flag. + --remove-region stringArray [Optional] Region name to be removed from the provider. Each region to be removed needs to be provided using a separate --remove-region definition. Removing a region removes the corresponding zones. + --add-image-bundle stringArray [Optional] Add Intel x86_64 image bundles associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-name=,machine-image=,ssh-user=,ssh-port=,default=". Image bundle name, machine image and SSH user are required key-value pairs. The default SSH Port is 22. Default marks the image bundle as default for the provider. Each image bundle can be added using separate --image-bundle flag. Example: --add-image-bundle =,machine-image=,=,=22 + --edit-image-bundle stringArray [Optional] Edit Intel x86_64 image bundles associated with the provider. Provide the following comma separated fields as key-value pairs: "image-bundle-uuid=,machine-image=,ssh-user=,ssh-port=,default=". Image bundle UUID is a required key-value pair.Each image bundle can be added using separate --image-bundle flag. Example: --edit-image-bundle =,machine-image=,=,=22 + --remove-image-bundle stringArray [Optional] Image bundle UUID to be removed from the provider. Each bundle to be removed needs to be provided using a separate --remove-image-bundle definition. + --airgap-install [Optional] Are YugabyteDB nodes installed in an air-gapped environment, lacking access to the public internet for package downloads. + --ntp-servers stringArray [Optional] List of NTP Servers. Can be provided as separate flags or as comma-separated values. + -h, --help help for update ``` ### Options inherited from parent commands diff --git a/managed/yba-cli/internal/formatter/aws/aws.go b/managed/yba-cli/internal/formatter/aws/aws.go index b4483ebafe8c..6e764b235264 100644 --- a/managed/yba-cli/internal/formatter/aws/aws.go +++ b/managed/yba-cli/internal/formatter/aws/aws.go @@ -18,7 +18,7 @@ const ( "\t{{.HostedZoneName}}\t{{.VpcType}}" // Region provides header for AWS Region Cloud Info - Region = "table {{.Arch}}\t{{.SecurityGroupID}}\t{{.VNet}}\t{{.YbImage}}" + Region = "table {{.Arch}}\t{{.SecurityGroupID}}\t{{.VNet}}" // EAR1 for EAR listing EAR1 = "table {{.AccessKeyID}}\t{{.AccessKeySecret}}\t{{.EndPoint}}" @@ -40,7 +40,6 @@ const ( archHeader = "Arch" sgIDHeader = "Security Group ID" vnetHeader = "Virual Network" - ybImageHeader = "YB Image" endPointHeader = "EndPoint" cmkPolicyHeader = "CMK Policy" cmkIDHeader = "CMK ID" @@ -120,7 +119,6 @@ func NewRegionContext() *RegionContext { "Arch": archHeader, "SecurityGroupID": sgIDHeader, "VNet": vnetHeader, - "YbImage": ybImageHeader, } return &awsRegionCtx } @@ -184,11 +182,6 @@ func (c *RegionContext) VNet() string { return c.Region.GetVnet() } -// YbImage fetches AWS Region yb image -func (c *RegionContext) YbImage() string { - return c.Region.GetYbImage() -} - // MarshalJSON function func (c *RegionContext) MarshalJSON() ([]byte, error) { return json.Marshal(c.Region) diff --git a/managed/yba-cli/internal/formatter/azu/azu.go b/managed/yba-cli/internal/formatter/azu/azu.go index 91b5bf9b7cb7..e1cfbc454464 100644 --- a/managed/yba-cli/internal/formatter/azu/azu.go +++ b/managed/yba-cli/internal/formatter/azu/azu.go @@ -20,7 +20,7 @@ const ( Provider2 = "table {{.HostedZoneID}}\t{{.NetworkSubscriptionID}}\t{{.NetworkRG}}" + "\t{{.VpcType}}" // Region provides header for AZU Region Cloud Info - Region = "table {{.SecurityGroupID}}\t{{.VNet}}\t.{{.YbImage}}" + Region = "table {{.SecurityGroupID}}\t{{.VNet}}" // EAR1 provides header for Azure KMS Info EAR1 = "table {{.ClientID}}\t{{.ClientSecret}}\t{{.TenantID}}" @@ -39,7 +39,6 @@ const ( hostedZoneIDHeader = "Hosted Zone ID" sgIDHeader = "Security Group ID" vnetHeader = "Virual Network" - ybImageHeader = "YB Image" keyAlgorithmHeader = "Key Algorithm" @@ -127,7 +126,6 @@ func NewRegionContext() *RegionContext { azuRegionCtx.Header = formatter.SubHeaderContext{ "SecurityGroupID": sgIDHeader, "VNet": vnetHeader, - "YbImage": ybImageHeader, } return &azuRegionCtx } @@ -207,11 +205,6 @@ func (c *RegionContext) VNet() string { return c.Region.GetVnet() } -// YbImage fetches Azure Region yb image -func (c *RegionContext) YbImage() string { - return c.Region.GetYbImage() -} - // MarshalJSON function func (c *RegionContext) MarshalJSON() ([]byte, error) { return json.Marshal(c.Region) diff --git a/managed/yba-cli/internal/formatter/gcp/gcp.go b/managed/yba-cli/internal/formatter/gcp/gcp.go index 4d63c31cc22f..ba3d7460f6b9 100644 --- a/managed/yba-cli/internal/formatter/gcp/gcp.go +++ b/managed/yba-cli/internal/formatter/gcp/gcp.go @@ -19,7 +19,7 @@ const ( "\t{{.FirewallTags}}" // Region provides header for GCP Region Cloud Info - Region = "table {{.InstanceTemplate}}\t{{.YbImage}}" + Region = "table {{.InstanceTemplate}}" // EAR1 for EAR listing EAR1 = "table {{.LocationID}}\t{{.ProtectionLevel}}" @@ -34,7 +34,6 @@ const ( vpcTypeHeader = "VPC Type" ybFirewallTagsHeader = "YB Firewall Tags" instanceTemplateHeader = "Instance Template" - ybImageHeader = "YB Image" gcpConfigHeader = "GCP Config" locationIDHeader = "Location ID" protectionLevelHeader = "Protection Level" @@ -113,7 +112,6 @@ func NewRegionContext() *RegionContext { gcpRegionCtx := RegionContext{} gcpRegionCtx.Header = formatter.SubHeaderContext{ "InstanceTemplate": instanceTemplateHeader, - "YbImage": ybImageHeader, } return &gcpRegionCtx } @@ -157,11 +155,6 @@ func (c *RegionContext) InstanceTemplate() string { return c.Region.GetInstanceTemplate() } -// YbImage fetches the YB image -func (c *RegionContext) YbImage() string { - return c.Region.GetYbImage() -} - // MarshalJSON function func (c *RegionContext) MarshalJSON() ([]byte, error) { return json.Marshal(c.Region) diff --git a/managed/yba-cli/internal/formatter/provider/imagebundle.go b/managed/yba-cli/internal/formatter/provider/imagebundle.go index 38ebbb1aa1fd..67d95487a4ec 100644 --- a/managed/yba-cli/internal/formatter/provider/imagebundle.go +++ b/managed/yba-cli/internal/formatter/provider/imagebundle.go @@ -28,7 +28,7 @@ const ( activeHeader = "Active" globalYbImageHeader = "Global YB Image" archHeader = "Architecture" - useIMDSv2Header = "Use IMDS V2" + useIMDSv2Header = "Use IMDS V2 (AWS)" regionOverridesHeader = "Region Overrides" imageBundleTypeHeader = "Image Bundle Type" versionHeader = "Version" From 5f95ff9a9ee880a7d3261acace62364c940af5fb Mon Sep 17 00:00:00 2001 From: Amitanand Aiyer Date: Wed, 19 Jun 2024 12:03:52 +0800 Subject: [PATCH 74/75] [#23905] DocDB: Persistence for Master side Table/Object locks Summary: Implements persistence for master side DDL lock acquistion and release. Handles restoring/rebuilding the Lock state at the master after master-failover/restart. Handle bootstrapping the new TServer with the locks already taken. ---- Does not handle release of locks for a "dead" TServer. Jira: DB-12809 Test Plan: yb_build.sh --cxx-test object_lock-test Reviewers: bkolagani, zdrudi Reviewed By: zdrudi Subscribers: ybase Differential Revision: https://phorge.dev.yugabyte.com/D36794 --- src/yb/integration-tests/object_lock-test.cc | 121 ++++- src/yb/master/CMakeLists.txt | 2 +- src/yb/master/catalog_entity_info.h | 19 + src/yb/master/catalog_entity_info.proto | 23 + src/yb/master/catalog_entity_types.h | 3 +- src/yb/master/catalog_loaders.cc | 14 + src/yb/master/catalog_loaders.h | 1 + src/yb/master/catalog_manager.cc | 25 +- src/yb/master/catalog_manager.h | 13 +- src/yb/master/catalog_manager_ext.cc | 1 + src/yb/master/master_heartbeat.proto | 5 + src/yb/master/master_heartbeat_service.cc | 7 + src/yb/master/master_tablet_service.cc | 4 +- src/yb/master/master_types.proto | 1 + src/yb/master/mini_master.cc | 4 + src/yb/master/mini_master.h | 2 + src/yb/master/object_lock.cc | 294 ----------- src/yb/master/object_lock.h | 38 -- src/yb/master/object_lock_info_manager.cc | 506 +++++++++++++++++++ src/yb/master/object_lock_info_manager.h | 62 +++ src/yb/master/sys_catalog.cc | 2 +- src/yb/master/sys_catalog_writer.cc | 1 - src/yb/tserver/heartbeater.cc | 5 + src/yb/tserver/tablet_server.cc | 8 + src/yb/tserver/tablet_server.h | 1 + src/yb/tserver/ts_local_lock_manager-test.cc | 4 +- src/yb/tserver/ts_local_lock_manager.cc | 53 +- src/yb/tserver/ts_local_lock_manager.h | 8 +- src/yb/tserver/tserver.proto | 4 + 29 files changed, 870 insertions(+), 361 deletions(-) delete mode 100644 src/yb/master/object_lock.cc delete mode 100644 src/yb/master/object_lock.h create mode 100644 src/yb/master/object_lock_info_manager.cc create mode 100644 src/yb/master/object_lock_info_manager.h diff --git a/src/yb/integration-tests/object_lock-test.cc b/src/yb/integration-tests/object_lock-test.cc index 3153cbe46b0e..98e44a178d8c 100644 --- a/src/yb/integration-tests/object_lock-test.cc +++ b/src/yb/integration-tests/object_lock-test.cc @@ -97,6 +97,7 @@ constexpr uint64_t kSessionId = 1; constexpr uint64_t kSessionId2 = 2; constexpr uint64_t kDatabaseID = 1; constexpr uint64_t kObjectId = 1; +constexpr uint64_t kObjectId2 = 2; constexpr size_t kTimeoutMs = 5000; tserver::AcquireObjectLockRequestPB AcquireRequestFor( @@ -198,6 +199,16 @@ TEST_F(ObjectLockTest, AcquireObjectLocksWaitsOnTServer) { ASSERT_EQ(tserver0->server()->ts_local_lock_manager()->TEST_WaitingLocksSize(), 0); } +TEST_F(ObjectLockTest, AcquireAndReleaseDDLLock) { + auto master_proxy = ASSERT_RESULT(MasterLeaderProxy()); + ASSERT_OK(AcquireLockAt( + &master_proxy, kSessionId2, kDatabaseID, kObjectId, TableLockType::ACCESS_EXCLUSIVE)); + ASSERT_OK(ReleaseLockAt(&master_proxy, kSessionId2, kDatabaseID, kObjectId)); + + // Release non-existent lock. + ASSERT_OK(ReleaseLockAt(&master_proxy, kSessionId2, kDatabaseID, kObjectId2)); +} + TEST_F(ObjectLockTest, AcquireObjectLocksRetriesUponMultipleTServerAddition) { auto* tserver0 = cluster_->mini_tablet_server(0); auto tserver0_proxy = TServerProxyFor(tserver0); @@ -219,12 +230,11 @@ TEST_F(ObjectLockTest, AcquireObjectLocksRetriesUponMultipleTServerAddition) { }, MonoDelta::FromMilliseconds(kTimeoutMs), "wait for blocking on TServer0")); - // Expect to see that the lock acquisition happens even at the new tserver auto num_ts = cluster_->num_tablet_servers(); ASSERT_OK(cluster_->AddTabletServer()); ASSERT_OK(cluster_->WaitForTabletServerCount(num_ts + 1)); - // Add TS-4 + // Add TS-4. auto* added_tserver1 = cluster_->mini_tablet_server(num_ts); ASSERT_EQ(added_tserver1->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(), 0); auto added_tserver1_proxy = TServerProxyFor(added_tserver1); @@ -255,13 +265,110 @@ TEST_F(ObjectLockTest, AcquireObjectLocksRetriesUponMultipleTServerAddition) { // Add TS-5 ASSERT_OK(cluster_->AddTabletServer()); ASSERT_OK(cluster_->WaitForTabletServerCount(num_ts + 2)); - auto* added_tserver2 = cluster_->mini_tablet_server(num_ts + 1); - ASSERT_EQ(added_tserver2->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(), 0); - // TS-5 was added after the lock acquisition was complete. Unless we add master persistence - // and bootstrapping the lock manager during TSRegistration, we expect to see no locks on ts-5 - ASSERT_EQ(added_tserver2->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(), 0); + auto* added_tserver2 = cluster_->mini_tablet_server(num_ts + 1); + ASSERT_OK(WaitFor( + [added_tserver2]() { + return added_tserver2->server()->ts_local_lock_manager()->TEST_GrantedLocksSize() > 0; + }, + 1s, "Wait for the added TS to bootstrap")); + // DDL lock acquisition should have bootstrapped during registration and taken the lock on TS-5 + // also + ASSERT_GE(added_tserver2->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(), 1); ASSERT_EQ(added_tserver2->server()->ts_local_lock_manager()->TEST_WaitingLocksSize(), 0); } +TEST_F(ObjectLockTest, BootstrapTServersUponAddition) { + auto master_proxy = ASSERT_RESULT(MasterLeaderProxy()); + ASSERT_OK(AcquireLockAt( + &master_proxy, kSessionId2, kDatabaseID, kObjectId, TableLockType::ACCESS_EXCLUSIVE)); + + auto num_ts = cluster_->num_tablet_servers(); + ASSERT_OK(cluster_->AddTabletServer()); + ASSERT_OK(cluster_->WaitForTabletServerCount(num_ts + 1)); + + auto* added_tserver = cluster_->mini_tablet_server(num_ts); + ASSERT_OK(WaitFor( + [added_tserver]() { + return added_tserver->server()->ts_local_lock_manager()->TEST_GrantedLocksSize() > 0; + }, + 1s, "Wait for the added TS to bootstrap")); + + auto expected_locks = + cluster_->mini_tablet_server(0)->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(); + ASSERT_GE(expected_locks, 1); + // Expect to see that the lock acquisition happens even at the new tserver + LOG(INFO) << "Counts after acquiring the DDL lock and adding TServers"; + for (auto ts : cluster_->mini_tablet_servers()) { + LOG(INFO) << ts->ToString() << " TestWaitingLocksSize: " + << ts->server()->ts_local_lock_manager()->TEST_WaitingLocksSize() + << " TestGrantedLocksSize: " + << ts->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(); + ASSERT_EQ(ts->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(), expected_locks); + } + + ASSERT_OK(ReleaseLockAt(&master_proxy, kSessionId2, kDatabaseID, kObjectId)); + + LOG(INFO) << "Counts after releasing the DDL lock"; + expected_locks = 0; + for (auto ts : cluster_->mini_tablet_servers()) { + LOG(INFO) << ts->ToString() << " TestWaitingLocksSize: " + << ts->server()->ts_local_lock_manager()->TEST_WaitingLocksSize() + << " TestGrantedLocksSize: " + << ts->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(); + ASSERT_EQ(ts->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(), expected_locks); + } +} + +class MultiMasterObjectLockTest : public ObjectLockTest { + protected: + int num_masters() override { + return 3; + } +}; + +TEST_F_EX(ObjectLockTest, AcquireAndReleaseDDLLockAcrossMasterFailover, MultiMasterObjectLockTest) { + const auto num_ts = cluster_->num_tablet_servers(); + auto* leader_master1 = ASSERT_RESULT(cluster_->GetLeaderMiniMaster()); + { + LOG(INFO) << "Acquiring lock on object " << kObjectId << " from master " + << leader_master1->ToString(); + auto master_proxy = MasterProxy(leader_master1); + ASSERT_OK(AcquireLockAt( + &master_proxy, kSessionId2, kDatabaseID, kObjectId, TableLockType::ACCESS_EXCLUSIVE)); + } + + for (const auto& tserver : cluster_->mini_tablet_servers()) { + LOG(INFO) << tserver->ToString() << " GrantedLocks " + << tserver->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(); + ASSERT_GE(tserver->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(), 1); + } + + LOG(INFO) << "Stepping down from " << leader_master1->ToString(); + ASSERT_OK(cluster_->StepDownMasterLeader()); + ASSERT_OK(cluster_->WaitForTabletServerCount(num_ts)); + + ASSERT_OK(cluster_->AddTabletServer()); + ASSERT_OK(cluster_->WaitForTabletServerCount(num_ts + 1)); + + auto* added_tserver = cluster_->mini_tablet_server(num_ts); + ASSERT_OK(WaitFor( + [added_tserver]() { + return added_tserver->server()->ts_local_lock_manager()->TEST_GrantedLocksSize() > 0; + }, + 1s, "Wait for the added TS to bootstrap")); + LOG(INFO) << added_tserver->ToString() << " GrantedLocks " + << added_tserver->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(); + ASSERT_GE(added_tserver->server()->ts_local_lock_manager()->TEST_GrantedLocksSize(), 1); + + // Release lock + auto* leader_master2 = ASSERT_RESULT(cluster_->GetLeaderMiniMaster()); + { + LOG(INFO) << "Releasing lock on object " << kObjectId << " at master " + << leader_master2->ToString(); + auto master_proxy = MasterProxy(leader_master2); + ASSERT_OK(ReleaseLockAt(&master_proxy, kSessionId2, kDatabaseID, kObjectId)); + } +} + } // namespace yb diff --git a/src/yb/master/CMakeLists.txt b/src/yb/master/CMakeLists.txt index b42c213ad958..f2c02c8b953c 100644 --- a/src/yb/master/CMakeLists.txt +++ b/src/yb/master/CMakeLists.txt @@ -114,7 +114,7 @@ set(MASTER_SRCS mini_master.cc master_snapshot_coordinator.cc multi_step_monitored_task.cc - object_lock.cc + object_lock_info_manager.cc post_tablet_create_task_base.cc restoration_state.cc restore_sys_catalog_state.cc diff --git a/src/yb/master/catalog_entity_info.h b/src/yb/master/catalog_entity_info.h index 9444d2e7fba4..07ebf95d7699 100644 --- a/src/yb/master/catalog_entity_info.h +++ b/src/yb/master/catalog_entity_info.h @@ -942,6 +942,25 @@ class UDTypeInfo : public RefCountedThreadSafe, DISALLOW_COPY_AND_ASSIGN(UDTypeInfo); }; +// This wraps around the proto containing information about what locks have been taken. +// It will be used for LockObject persistence. +struct PersistentObjectLockInfo : public Persistent {}; + +class ObjectLockInfo : public MetadataCowWrapper { + public: + explicit ObjectLockInfo(const std::string& ts_uuid) : ts_uuid_(ts_uuid) {} + ~ObjectLockInfo() = default; + + // Return the user defined type's ID. Does not require synchronization. + virtual const std::string& id() const override { return ts_uuid_; } + + private: + // The ID field is used in the sys_catalog table. + const std::string ts_uuid_; + + DISALLOW_COPY_AND_ASSIGN(ObjectLockInfo); +}; + // This wraps around the proto containing cluster level config information. It will be used for // CowObject managed access. struct PersistentClusterConfigInfo : public Persistent {}; diff --git a/src/yb/master/catalog_entity_info.proto b/src/yb/master/catalog_entity_info.proto index 1143e5b19369..444a951bba32 100644 --- a/src/yb/master/catalog_entity_info.proto +++ b/src/yb/master/catalog_entity_info.proto @@ -21,6 +21,7 @@ import "yb/cdc/xcluster_producer.proto"; import "yb/common/common.proto"; import "yb/common/common_net.proto"; import "yb/common/common_types.proto"; +import "yb/common/transaction.proto"; import "yb/common/wire_protocol.proto"; import "yb/consensus/metadata.proto"; import "yb/master/master_types.proto"; @@ -392,6 +393,28 @@ message SysClusterConfigEntryPB { optional string universe_uuid = 8; } +message SysObjectLockEntryPB { + message LockTypesPB { repeated TableLockType lock_type = 1; } + message ObjectLocksMapPB { + // object_id -> Locks taken + map objects = 1; + } + message DBObjectsMapPB { + // db_id -> map of objects/locks-taken + map dbs = 1; + } + message SessionDBMapPB { + // session_id -> DB ... + map sessions = 1; + } + + // host_uid is part of the key. Thus not stored + // explicitly in this proto again. + + // incarnation id -> Session ... + map incarnations = 1; +} + message SysXClusterConfigEntryPB { optional uint32 version = 1; optional xcluster.ProducerRegistryPB xcluster_producer_registry = 2; diff --git a/src/yb/master/catalog_entity_types.h b/src/yb/master/catalog_entity_types.h index 1ff45a3850ce..08f63c4d31fa 100644 --- a/src/yb/master/catalog_entity_types.h +++ b/src/yb/master/catalog_entity_types.h @@ -50,7 +50,8 @@ class SysRowEntry; ((UNIVERSE_REPLICATION_BOOTSTRAP, SysUniverseReplicationBootstrapEntryPB)) \ ((XCLUSTER_OUTBOUND_REPLICATION_GROUP, SysXClusterOutboundReplicationGroupEntryPB)) \ ((CLONE_STATE, SysCloneStatePB)) \ - ((TSERVER_REGISTRATION, SysTServerEntryPB)) + ((TSERVER_REGISTRATION, SysTServerEntryPB)) \ + ((OBJECT_LOCK_ENTRY, SysObjectLockEntryPB)) // We should have an entry for each SysRowEntryType in the map except for UNKNOWN. static_assert( diff --git a/src/yb/master/catalog_loaders.cc b/src/yb/master/catalog_loaders.cc index 085a06bf0955..be6cf378ec5d 100644 --- a/src/yb/master/catalog_loaders.cc +++ b/src/yb/master/catalog_loaders.cc @@ -558,6 +558,20 @@ Status UDTypeLoader::Visit(const UDTypeId& udtype_id, const SysUDTypeEntryPB& me return Status::OK(); } +// key corresponds to the host_uuid. +Status ObjectLockLoader::Visit(const std::string& host_uuid, const SysObjectLockEntryPB& pb) { + std::shared_ptr info = std::make_shared(host_uuid); + { + auto l = info->LockForWrite(); + l.mutable_data()->pb.CopyFrom(pb); + l.Commit(); + catalog_manager_->object_lock_info_manager_->InsertOrAssign(host_uuid, info); + } + + LOG(INFO) << "Loaded metadata for type " << info->ToString(); + VLOG(1) << "Metadata for type " << info->ToString() << ": " << pb.ShortDebugString(); + return Status::OK(); +} //////////////////////////////////////////////////////////// // Config Loader //////////////////////////////////////////////////////////// diff --git a/src/yb/master/catalog_loaders.h b/src/yb/master/catalog_loaders.h index 71a2b40b060f..513741451fa4 100644 --- a/src/yb/master/catalog_loaders.h +++ b/src/yb/master/catalog_loaders.h @@ -120,6 +120,7 @@ DECLARE_LOADER_CLASS(Namespace, NamespaceId, SysNamespaceEntryPB, catalo DECLARE_LOADER_CLASS(UDType, UDTypeId, SysUDTypeEntryPB, catalog_manager_->mutex_); DECLARE_LOADER_CLASS(ClusterConfig, std::string, SysClusterConfigEntryPB, catalog_manager_->mutex_); DECLARE_LOADER_CLASS(RedisConfig, std::string, SysRedisConfigEntryPB, catalog_manager_->mutex_); +DECLARE_LOADER_CLASS(ObjectLock, std::string, SysObjectLockEntryPB, catalog_manager_->mutex_); DECLARE_LOADER_CLASS(Role, RoleName, SysRoleEntryPB, catalog_manager_->permissions_manager()->mutex()); DECLARE_LOADER_CLASS(SysConfig, std::string, SysConfigEntryPB, diff --git a/src/yb/master/catalog_manager.cc b/src/yb/master/catalog_manager.cc index 9408d8eb8784..264723e8d5e4 100644 --- a/src/yb/master/catalog_manager.cc +++ b/src/yb/master/catalog_manager.cc @@ -120,6 +120,7 @@ #include "yb/master/async_rpc_tasks.h" #include "yb/master/backfill_index.h" #include "yb/master/catalog_entity_info.h" +#include "yb/master/catalog_entity_info.pb.h" #include "yb/master/catalog_entity_parser.h" #include "yb/master/catalog_loaders.h" #include "yb/master/catalog_manager-internal.h" @@ -140,7 +141,7 @@ #include "yb/master/master_replication.pb.h" #include "yb/master/master_snapshot_coordinator.h" #include "yb/master/master_util.h" -#include "yb/master/object_lock.h" +#include "yb/master/object_lock_info_manager.h" #include "yb/master/permissions_manager.h" #include "yb/master/post_tablet_create_task_base.h" #include "yb/master/scoped_leader_shared_lock-internal.h" @@ -932,6 +933,7 @@ CatalogManager::CatalogManager(Master* master) leader_lock_(RWMutex::Priority::PREFER_WRITING), load_balance_policy_(std::make_unique(this)), tablegroup_manager_(std::make_unique()), + object_lock_info_manager_(std::make_unique(master_, this)), permissions_manager_(std::make_unique(this)), tasks_tracker_(new TasksTracker(IsUserInitiated::kFalse)), jobs_tracker_(new TasksTracker(IsUserInitiated::kTrue)), @@ -1412,6 +1414,9 @@ Status CatalogManager::RunLoaders(SysCatalogLoadingState* state) { // Clear redis config mapping. redis_config_map_.clear(); + // Clear Object lock mapping. + object_lock_info_manager_->Clear(); + // Clear ysql catalog config. ysql_catalog_config_.reset(); @@ -1458,6 +1463,7 @@ Status CatalogManager::RunLoaders(SysCatalogLoadingState* state) { RETURN_NOT_OK(Load("user-defined types", state)); RETURN_NOT_OK(Load("cluster configuration", state)); RETURN_NOT_OK(Load("Redis config", state)); + RETURN_NOT_OK(Load("Object locks", state)); if (!transaction_tables_config_) { RETURN_NOT_OK(InitializeTransactionTablesConfig(state->epoch.leader_term)); @@ -5346,6 +5352,7 @@ std::string CatalogManager::GenerateIdUnlocked( case SysRowEntryType::UNIVERSE_REPLICATION_BOOTSTRAP: FALLTHROUGH_INTENDED; case SysRowEntryType::XCLUSTER_OUTBOUND_REPLICATION_GROUP: FALLTHROUGH_INTENDED; case SysRowEntryType::TSERVER_REGISTRATION: FALLTHROUGH_INTENDED; + case SysRowEntryType::OBJECT_LOCK_ENTRY: FALLTHROUGH_INTENDED; case SysRowEntryType::UNKNOWN: LOG(DFATAL) << "Invalid id type: " << *entity_type; return id; @@ -6161,8 +6168,8 @@ Status CatalogManager::DeleteIndexInfoFromTable( } void CatalogManager::AcquireObjectLocks( - const tserver::AcquireObjectLockRequestPB* req, tserver::AcquireObjectLockResponsePB* resp, - rpc::RpcContext rpc) { + LeaderEpoch epoch, const tserver::AcquireObjectLockRequestPB* req, + tserver::AcquireObjectLockResponsePB* resp, rpc::RpcContext rpc) { VLOG(0) << __PRETTY_FUNCTION__; if (!FLAGS_TEST_enable_object_locking_for_table_locks) { rpc.RespondRpcFailure( @@ -6170,12 +6177,12 @@ void CatalogManager::AcquireObjectLocks( STATUS(NotSupported, "Flag enable_object_locking_for_table_locks disabled")); return; } - LockObject(master_, this, req, resp, std::move(rpc)); + object_lock_info_manager_->LockObject(epoch, req, resp, std::move(rpc)); } void CatalogManager::ReleaseObjectLocks( - const tserver::ReleaseObjectLockRequestPB* req, tserver::ReleaseObjectLockResponsePB* resp, - rpc::RpcContext rpc) { + LeaderEpoch epoch, const tserver::ReleaseObjectLockRequestPB* req, + tserver::ReleaseObjectLockResponsePB* resp, rpc::RpcContext rpc) { VLOG(0) << __PRETTY_FUNCTION__; if (!FLAGS_TEST_enable_object_locking_for_table_locks) { rpc.RespondRpcFailure( @@ -6183,7 +6190,11 @@ void CatalogManager::ReleaseObjectLocks( STATUS(NotSupported, "Flag enable_object_locking_for_table_locks disabled")); return; } - UnlockObject(master_, this, req, resp, std::move(rpc)); + object_lock_info_manager_->UnlockObject(epoch, req, resp, std::move(rpc)); +} + +void CatalogManager::ExportObjectLockInfo(tserver::DdlLockEntriesPB* resp) { + object_lock_info_manager_->ExportObjectLockInfo(resp); } Status CatalogManager::GetIndexBackfillProgress(const GetIndexBackfillProgressRequestPB* req, diff --git a/src/yb/master/catalog_manager.h b/src/yb/master/catalog_manager.h index bc5ff814f6d8..4444685ba2a3 100644 --- a/src/yb/master/catalog_manager.h +++ b/src/yb/master/catalog_manager.h @@ -70,6 +70,7 @@ #include "yb/master/master_encryption.fwd.h" #include "yb/master/master_heartbeat.pb.h" #include "yb/master/master_types.h" +#include "yb/master/object_lock_info_manager.h" #include "yb/master/scoped_leader_shared_lock.h" #include "yb/master/snapshot_coordinator_context.h" #include "yb/master/sys_catalog.h" @@ -406,11 +407,12 @@ class CatalogManager : public tserver::TabletPeerLookupIf, const LeaderEpoch& epoch); void AcquireObjectLocks( - const tserver::AcquireObjectLockRequestPB* req, tserver::AcquireObjectLockResponsePB* resp, - rpc::RpcContext rpc); + LeaderEpoch epoch, const tserver::AcquireObjectLockRequestPB* req, + tserver::AcquireObjectLockResponsePB* resp, rpc::RpcContext rpc); void ReleaseObjectLocks( - const tserver::ReleaseObjectLockRequestPB* req, tserver::ReleaseObjectLockResponsePB* resp, - rpc::RpcContext rpc); + LeaderEpoch epoch, const tserver::ReleaseObjectLockRequestPB* req, + tserver::ReleaseObjectLockResponsePB* resp, rpc::RpcContext rpc); + void ExportObjectLockInfo(tserver::DdlLockEntriesPB* resp); // Gets the progress of ongoing index backfills. Status GetIndexBackfillProgress(const GetIndexBackfillProgressRequestPB* req, @@ -1684,6 +1686,7 @@ class CatalogManager : public tserver::TabletPeerLookupIf, friend class BackendsCatalogVersionJob; friend class AddTableToXClusterTargetTask; friend class VerifyDdlTransactionTask; + friend class ObjectLockLoader; FRIEND_TEST(yb::MasterPartitionedTest, VerifyOldLeaderStepsDown); @@ -2393,6 +2396,8 @@ class CatalogManager : public tserver::TabletPeerLookupIf, std::unique_ptr tablegroup_manager_ GUARDED_BY(mutex_); + std::unique_ptr object_lock_info_manager_; + boost::optional> initdb_future_; boost::optional initial_snapshot_writer_; diff --git a/src/yb/master/catalog_manager_ext.cc b/src/yb/master/catalog_manager_ext.cc index f4cf3100fa91..80fc5c91f28d 100644 --- a/src/yb/master/catalog_manager_ext.cc +++ b/src/yb/master/catalog_manager_ext.cc @@ -747,6 +747,7 @@ Status CatalogManager::ImportSnapshotPreprocess( case SysRowEntryType::XCLUSTER_OUTBOUND_REPLICATION_GROUP: FALLTHROUGH_INTENDED; case SysRowEntryType::CLONE_STATE: FALLTHROUGH_INTENDED; case SysRowEntryType::TSERVER_REGISTRATION: FALLTHROUGH_INTENDED; + case SysRowEntryType::OBJECT_LOCK_ENTRY: FALLTHROUGH_INTENDED; case SysRowEntryType::UNKNOWN: FATAL_INVALID_ENUM_VALUE(SysRowEntryType, entry.type()); } diff --git a/src/yb/master/master_heartbeat.proto b/src/yb/master/master_heartbeat.proto index 094e54db4632..197863e66663 100644 --- a/src/yb/master/master_heartbeat.proto +++ b/src/yb/master/master_heartbeat.proto @@ -25,6 +25,7 @@ import "yb/consensus/consensus_types.proto"; import "yb/encryption/encryption.proto"; import "yb/master/master_types.proto"; import "yb/tablet/tablet_types.proto"; +import "yb/tserver/tserver.proto"; import "yb/rpc/service.proto"; // Common information sent with every request from the tablet server @@ -320,6 +321,10 @@ message TSHeartbeatResponsePB { optional uint32 xcluster_config_version = 24; optional string universe_uuid = 25; + + // TODO: If this ends up being too big, consider adding a way to break this up + // into multiple messages. + optional tserver.DdlLockEntriesPB ddl_lock_entries = 26; } service MasterHeartbeat { diff --git a/src/yb/master/master_heartbeat_service.cc b/src/yb/master/master_heartbeat_service.cc index 01b1a9bc4919..396fe288a9e2 100644 --- a/src/yb/master/master_heartbeat_service.cc +++ b/src/yb/master/master_heartbeat_service.cc @@ -1460,6 +1460,13 @@ Result MasterHeartbeatServiceImpl::RegisterTServerOrRespond( auto desc_result = server_->ts_manager()->RegisterFromHeartbeat( req, server_->MakeCloudInfoPB(), &server_->proxy_cache()); if (desc_result.ok()) { + // Populate the response to bootstrap object locks. + // TODO: This would also need to be done whenever a tablet server with an + // expired lease gets a new lease. YSQL Leases are yet to be implemented. + // when that happens, we should re-bootstrap the TServer and bump up + // it's incarnation id (similar to how instance_seqno behaves across restarts). + LOG(INFO) << "Registering " << req.common().ts_instance().ShortDebugString(); + server_->catalog_manager_impl()->ExportObjectLockInfo(resp->mutable_ddl_lock_entries()); return std::move(*desc_result); } auto status = std::move(desc_result.status()); diff --git a/src/yb/master/master_tablet_service.cc b/src/yb/master/master_tablet_service.cc index ccef9732b435..c6ca984d901a 100644 --- a/src/yb/master/master_tablet_service.cc +++ b/src/yb/master/master_tablet_service.cc @@ -85,7 +85,7 @@ void MasterTabletServiceImpl::AcquireObjectLocks( return; } - master_->catalog_manager_impl()->AcquireObjectLocks(req, resp, std::move(context)); + master_->catalog_manager_impl()->AcquireObjectLocks(l.epoch(), req, resp, std::move(context)); } void MasterTabletServiceImpl::ReleaseObjectLocks( @@ -106,7 +106,7 @@ void MasterTabletServiceImpl::ReleaseObjectLocks( return; } - master_->catalog_manager_impl()->ReleaseObjectLocks(req, resp, std::move(context)); + master_->catalog_manager_impl()->ReleaseObjectLocks(l.epoch(), req, resp, std::move(context)); } void MasterTabletServiceImpl::Write(const tserver::WriteRequestPB* req, diff --git a/src/yb/master/master_types.proto b/src/yb/master/master_types.proto index 15aff00dc0e1..704d58faa671 100644 --- a/src/yb/master/master_types.proto +++ b/src/yb/master/master_types.proto @@ -49,6 +49,7 @@ enum SysRowEntryType { XCLUSTER_OUTBOUND_REPLICATION_GROUP = 18; CLONE_STATE = 19; TSERVER_REGISTRATION = 20; + OBJECT_LOCK_ENTRY = 21; // Each value must have a corresponding entry in CATALOG_ENTITY_TYPE_MAP of // catalog_entity_types.h diff --git a/src/yb/master/mini_master.cc b/src/yb/master/mini_master.cc index 0d19bacd4f51..82829b255b3e 100644 --- a/src/yb/master/mini_master.cc +++ b/src/yb/master/mini_master.cc @@ -92,6 +92,10 @@ Status MiniMaster::StartDistributedMaster(const vector& peer_ports) { return StartDistributedMasterOnPorts(rpc_port_, web_port_, peer_ports); } +std::string MiniMaster::ToString() const { + return Format("m-$0", index_); +} + void MiniMaster::Shutdown() { TEST_SetThreadPrefixScoped prefix_se(Format("m-$0", index_)); if (tunnel_) { diff --git a/src/yb/master/mini_master.h b/src/yb/master/mini_master.h index 53a352b4ea42..ed37bb5ff8fa 100644 --- a/src/yb/master/mini_master.h +++ b/src/yb/master/mini_master.h @@ -117,6 +117,8 @@ class MiniMaster { FsManager& fs_manager() const; + std::string ToString() const; + private: Status StartDistributedMasterOnPorts(uint16_t rpc_port, uint16_t web_port, const std::vector& peer_ports); diff --git a/src/yb/master/object_lock.cc b/src/yb/master/object_lock.cc deleted file mode 100644 index f592d764a0e7..000000000000 --- a/src/yb/master/object_lock.cc +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright (c) YugaByte, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software distributed under the License -// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -// or implied. See the License for the specific language governing permissions and limitations -// under the License. -// - -#include "yb/master/object_lock.h" - -#include -#include -#include -#include - -#include "yb/common/wire_protocol.h" - -#include "yb/gutil/strings/substitute.h" - -#include "yb/master/async_rpc_tasks.h" -#include "yb/master/catalog_manager.h" -#include "yb/master/master.h" -#include "yb/master/master_error.h" -#include "yb/master/master_ddl.pb.h" -#include "yb/master/sys_catalog.h" - -#include "yb/rpc/rpc_context.h" - -#include "yb/tserver/tserver_service.proxy.h" - -#include "yb/util/flags.h" -#include "yb/util/format.h" -#include "yb/util/logging.h" -#include "yb/util/result.h" -#include "yb/util/status_format.h" -#include "yb/util/trace.h" - -namespace yb { -namespace master { - -using namespace std::literals; -using server::MonitoredTaskState; -using strings::Substitute; -using tserver::AcquireObjectLockRequestPB; -using tserver::AcquireObjectLockResponsePB; -using tserver::ReleaseObjectLockRequestPB; -using tserver::ReleaseObjectLockResponsePB; -using tserver::TabletServerErrorPB; - -template -class UpdateAllTServers : public std::enable_shared_from_this> { - public: - UpdateAllTServers( - Master* master, CatalogManager* catalog_manager, TSDescriptorVector&& ts_descs, - const Req& req, rpc::RpcContext rpc); - - void Launch(); - const Req& request() const { - return req_; - } - - private: - void LaunchFrom(size_t from_idx); - void Done(size_t i, const Status& s); - void CheckForDone(); - // Relaunches if there have been new TServers who joined. Returns true if relaunched. - bool RelaunchIfNecessary(); - - Master* master_; - CatalogManager* catalog_manager_; - TSDescriptorVector ts_descriptors_; - std::atomic ts_pending_; - std::vector statuses_; - const Req req_; - rpc::RpcContext context_; -}; - -template -class UpdateTServer : public RetrySpecificTSRpcTask { - public: - UpdateTServer( - Master* master, ThreadPool* callback_pool, const TabletServerId& ts_uuid, - std::shared_ptr> shared_all_tservers, - StdStatusCallback callback); - - server::MonitoredTaskType type() const override { return server::MonitoredTaskType::kObjectLock; } - - std::string type_name() const override { return "Object Lock"; } - - std::string description() const override; - - protected: - void Finished(const Status& status) override; - - const Req& request() const { - return shared_all_tservers_->request(); - } - - private: - TabletId tablet_id() const override { return TabletId(); } - - void HandleResponse(int attempt) override; - bool SendRequest(int attempt) override; - - StdStatusCallback callback_; - Resp resp_; - - std::shared_ptr> shared_all_tservers_; -}; - -void LockObject( - Master* master, CatalogManager* catalog_manager, const AcquireObjectLockRequestPB* req, - AcquireObjectLockResponsePB* resp, rpc::RpcContext rpc) { - VLOG(0) << __PRETTY_FUNCTION__; - // TODO: Fix this. GetAllDescriptors may need to change to handle tserver membership reliably. - auto ts_descriptors = master->ts_manager()->GetAllDescriptors(); - auto lock_objects = - std::make_shared>( - master, catalog_manager, std::move(ts_descriptors), *req, std::move(rpc)); - lock_objects->Launch(); -} - -void UnlockObject( - Master* master, CatalogManager* catalog_manager, const ReleaseObjectLockRequestPB* req, - ReleaseObjectLockResponsePB* resp, rpc::RpcContext rpc) { - VLOG(0) << __PRETTY_FUNCTION__; - // TODO: Fix this. GetAllDescriptors may need to change to handle tserver membership reliably. - auto ts_descriptors = master->ts_manager()->GetAllDescriptors(); - if (ts_descriptors.empty()) { - // TODO(Amit): Handle the case where the master receives a DDL lock/unlock request - // before all the TServers have registered - // (they may have registered with the older master-leader, thus serving DML reqs). - // Get rid of this check when we have done this. - rpc.RespondRpcFailure( - rpc::ErrorStatusPB::ERROR_APPLICATION, - STATUS(IllegalState, "No TServers registered with the master yet.")); - return; - } - auto unlock_objects = - std::make_shared>( - master, catalog_manager, std::move(ts_descriptors), *req, std::move(rpc)); - unlock_objects->Launch(); -} - -template -UpdateAllTServers::UpdateAllTServers( - Master* master, CatalogManager* catalog_manager, TSDescriptorVector&& ts_descriptors, - const Req& req, rpc::RpcContext rpc) - : master_(master), - catalog_manager_(catalog_manager), - ts_descriptors_(std::move(ts_descriptors)), - statuses_(ts_descriptors_.size(), STATUS(Uninitialized, "")), - req_(req), - context_(std::move(rpc)) { - VLOG(0) << __PRETTY_FUNCTION__; -} - -template -void UpdateAllTServers::Launch() { - LaunchFrom(0); -} - -template -void UpdateAllTServers::Done(size_t i, const Status& s) { - statuses_[i] = s; - // TODO: There is a potential here for early return if s is not OK. - if (--ts_pending_ == 0) { - CheckForDone(); - } -} - -template -void UpdateAllTServers::LaunchFrom(size_t start_idx) { - size_t num_descriptors = ts_descriptors_.size(); - ts_pending_ = num_descriptors - start_idx; - LOG(INFO) << __func__ << " launching for " << ts_pending_ << " tservers."; - for (size_t i = start_idx; i < num_descriptors; ++i) { - auto ts_uuid = ts_descriptors_[i]->permanent_uuid(); - LOG(INFO) << "Launching for " << ts_uuid; - auto callback = std::bind(&UpdateAllTServers::Done, this, i, std::placeholders::_1); - auto task = std::make_shared>( - master_, catalog_manager_->AsyncTaskPool(), ts_uuid, this->shared_from_this(), callback); - WARN_NOT_OK( - catalog_manager_->ScheduleTask(task), - yb::Format( - "Failed to schedule request to UpdateTServer to $0 for $1", ts_uuid, - request().DebugString())); - } -} - -template -void UpdateAllTServers::CheckForDone() { - for (const auto& status : statuses_) { - if (!status.ok()) { - LOG(INFO) << "Error in acquiring object lock: " << status; - // TBD: Release the taken locks. - // What is the best way to handle failures? Say one TServer errors out, - // Should we release the locks taken by the other TServers? What if one of the - // TServers had given a lock to the same session earlier -- would a release get - // rid of that lock as well? - // How to handle this during Release locks? - // - // What can cause failures here? - // - If a TServer goes down, how can we handle it? - context_.RespondRpcFailure(rpc::ErrorStatusPB::ERROR_APPLICATION, status); - return; - } - } - if (!RelaunchIfNecessary()) { - context_.RespondSuccess(); - } -} - -template -bool UpdateAllTServers::RelaunchIfNecessary() { - auto old_size = ts_descriptors_.size(); - auto ts_descriptors = master_->ts_manager()->GetAllDescriptors(); - for (auto ts_descriptor : ts_descriptors) { - if (std::find(ts_descriptors_.begin(), ts_descriptors_.end(), ts_descriptor) == - ts_descriptors_.end()) { - ts_descriptors_.push_back(ts_descriptor); - statuses_.push_back(Status::OK()); - } - } - if (ts_descriptors.size() == old_size) { - return false; - } - - LOG(INFO) << "New TServers were added. Relaunching."; - LaunchFrom(old_size); - return true; -} - -template -UpdateTServer::UpdateTServer( - Master* master, ThreadPool* callback_pool, const TabletServerId& ts_uuid, - std::shared_ptr> shared_all_tservers, StdStatusCallback callback) - : RetrySpecificTSRpcTask(master, callback_pool, ts_uuid, /* async_task_throttler */ nullptr), - callback_(std::move(callback)), - shared_all_tservers_(shared_all_tservers) {} - -template <> -bool UpdateTServer::SendRequest( - int attempt) { - VLOG(0) << ToString() << __func__ << " attempt " << attempt; - ts_proxy_->AcquireObjectLocksAsync(request(), &resp_, &rpc_, BindRpcCallback()); - return true; -} - -template <> -bool UpdateTServer::SendRequest( - int attempt) { - VLOG(0) << ToString() << __func__ << " attempt " << attempt; - ts_proxy_->ReleaseObjectLocksAsync(request(), &resp_, &rpc_, BindRpcCallback()); - return true; -} - -template <> -std::string UpdateTServer::description() - const { - return Format("Acquire object lock for $0 at $1", request().DebugString(), permanent_uuid_); -} - -template <> -std::string UpdateTServer::description() - const { - return Format("Release object lock for $0 at $1", request().DebugString(), permanent_uuid_); -} - -template -void UpdateTServer::HandleResponse(int attempt) { - VLOG(0) << ToString() << __func__ << " response is " << yb::ToString(resp_); - Status status; - if (resp_.has_error()) { - status = StatusFromPB(resp_.error().status()); - TransitionToFailedState(server::MonitoredTaskState::kRunning, status); - } else { - TransitionToCompleteState(); - } -} - -template -void UpdateTServer::Finished(const Status& status) { - VLOG(0) << ToString() << __func__ << " (" << status << ")"; - callback_(status); -} - -} // namespace master -} // namespace yb diff --git a/src/yb/master/object_lock.h b/src/yb/master/object_lock.h deleted file mode 100644 index 1db4b0947175..000000000000 --- a/src/yb/master/object_lock.h +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) YugaByte, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software distributed under the License -// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -// or implied. See the License for the specific language governing permissions and limitations -// under the License. -// - -#pragma once - -#include "yb/master/master_fwd.h" - -namespace yb::rpc { -class RpcContext; -} -namespace yb::tserver { -class AcquireObjectLockRequestPB; -class AcquireObjectLockResponsePB; -class ReleaseObjectLockRequestPB; -class ReleaseObjectLockResponsePB; -} // namespace yb::tserver - -namespace yb::master { - -void LockObject( - Master* master, CatalogManager* catalog_manager, const tserver::AcquireObjectLockRequestPB* req, - tserver::AcquireObjectLockResponsePB* resp, rpc::RpcContext rpc); - -void UnlockObject( - Master* master, CatalogManager* catalog_manager, const tserver::ReleaseObjectLockRequestPB* req, - tserver::ReleaseObjectLockResponsePB* resp, rpc::RpcContext rpc); - -} // namespace yb::master diff --git a/src/yb/master/object_lock_info_manager.cc b/src/yb/master/object_lock_info_manager.cc new file mode 100644 index 000000000000..c434f632b4fa --- /dev/null +++ b/src/yb/master/object_lock_info_manager.cc @@ -0,0 +1,506 @@ +// Copyright (c) YugaByte, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License +// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +// or implied. See the License for the specific language governing permissions and limitations +// under the License. +// + +#include "yb/master/object_lock_info_manager.h" + +#include +#include +#include +#include + +#include "yb/common/wire_protocol.h" + +#include "yb/gutil/strings/substitute.h" + +#include "yb/master/async_rpc_tasks.h" +#include "yb/master/catalog_manager.h" +#include "yb/master/master.h" +#include "yb/master/master_error.h" +#include "yb/master/master_ddl.pb.h" +#include "yb/master/sys_catalog.h" + +#include "yb/rpc/rpc_context.h" + +#include "yb/tserver/tserver.pb.h" +#include "yb/tserver/tserver_service.proxy.h" + +#include "yb/util/flags.h" +#include "yb/util/format.h" +#include "yb/util/logging.h" +#include "yb/util/result.h" +#include "yb/util/status_format.h" +#include "yb/util/trace.h" + +namespace yb { +namespace master { + +using namespace std::literals; +using server::MonitoredTaskState; +using strings::Substitute; +using tserver::AcquireObjectLockRequestPB; +using tserver::AcquireObjectLockResponsePB; +using tserver::ReleaseObjectLockRequestPB; +using tserver::ReleaseObjectLockResponsePB; +using tserver::TabletServerErrorPB; + +class ObjectLockInfoManager::Impl { + public: + Impl(Master* master, CatalogManager* catalog_manager) + : master_(master), catalog_manager_(catalog_manager) {} + + void LockObject( + LeaderEpoch epoch, const tserver::AcquireObjectLockRequestPB* req, + tserver::AcquireObjectLockResponsePB* resp, rpc::RpcContext rpc); + + void UnlockObject( + LeaderEpoch epoch, const tserver::ReleaseObjectLockRequestPB* req, + tserver::ReleaseObjectLockResponsePB* resp, rpc::RpcContext rpc); + + Status RequestDone(LeaderEpoch epoch, const tserver::AcquireObjectLockRequestPB& req) + EXCLUDES(mutex_); + Status RequestDone(LeaderEpoch epoch, const tserver::ReleaseObjectLockRequestPB& req) + EXCLUDES(mutex_); + + void ExportObjectLockInfo(tserver::DdlLockEntriesPB* resp) EXCLUDES(mutex_); + + bool InsertOrAssign(const std::string& tserver_uuid, std::shared_ptr info) + EXCLUDES(mutex_); + void Clear() EXCLUDES(mutex_); + + std::shared_ptr CreateOrGetObjectLockInfo(const std::string& key) + EXCLUDES(mutex_); + + private: + Master* master_; + CatalogManager* catalog_manager_; + std::atomic next_request_id_{0}; + + using MutexType = std::mutex; + using LockGuard = std::lock_guard; + mutable MutexType mutex_; + std::unordered_map> object_lock_infos_map_ + GUARDED_BY(mutex_); +}; + +template +class UpdateAllTServers : public std::enable_shared_from_this> { + public: + UpdateAllTServers( + LeaderEpoch epoch, Master* master, CatalogManager* catalog_manager, + ObjectLockInfoManager::Impl* object_lock_info_manager, TSDescriptorVector&& ts_descs, + const Req& req, rpc::RpcContext rpc); + + void Launch(); + const Req& request() const { + return req_; + } + + private: + void LaunchFrom(size_t from_idx); + void Done(size_t i, const Status& s); + void CheckForDone(); + // Relaunches if there have been new TServers who joined. Returns true if relaunched. + bool RelaunchIfNecessary(); + + LeaderEpoch epoch_; + Master* master_; + CatalogManager* catalog_manager_; + ObjectLockInfoManager::Impl* object_lock_info_manager_; + TSDescriptorVector ts_descriptors_; + std::atomic ts_pending_; + std::vector statuses_; + const Req req_; + rpc::RpcContext context_; +}; + +template +class UpdateTServer : public RetrySpecificTSRpcTask { + public: + UpdateTServer( + Master* master, ThreadPool* callback_pool, const TabletServerId& ts_uuid, + std::shared_ptr> shared_all_tservers, + StdStatusCallback callback); + + server::MonitoredTaskType type() const override { return server::MonitoredTaskType::kObjectLock; } + + std::string type_name() const override { return "Object Lock"; } + + std::string description() const override; + + protected: + void Finished(const Status& status) override; + + const Req& request() const { + return shared_all_tservers_->request(); + } + + private: + TabletId tablet_id() const override { return TabletId(); } + + void HandleResponse(int attempt) override; + bool SendRequest(int attempt) override; + + StdStatusCallback callback_; + Resp resp_; + + std::shared_ptr> shared_all_tservers_; +}; + +namespace { + +// TODO: Fetch and use the appropriate incarnation Id. +// Incarnation id will be used to identify whether the locks were requested +// by the currently registered tserver. Or is an old lock that should be released +// when the TServer loses its lease. +constexpr int kIncarnationId = 0; + +} // namespace + +ObjectLockInfoManager::ObjectLockInfoManager(Master* master, CatalogManager* catalog_manager) + : impl_(std::make_unique(master, catalog_manager)) {} + +ObjectLockInfoManager::~ObjectLockInfoManager() = default; + +void ObjectLockInfoManager::LockObject( + LeaderEpoch epoch, const tserver::AcquireObjectLockRequestPB* req, + tserver::AcquireObjectLockResponsePB* resp, rpc::RpcContext rpc) { + impl_->LockObject(epoch, req, resp, std::move(rpc)); +} +void ObjectLockInfoManager::UnlockObject( + LeaderEpoch epoch, const tserver::ReleaseObjectLockRequestPB* req, + tserver::ReleaseObjectLockResponsePB* resp, rpc::RpcContext rpc) { + impl_->UnlockObject(epoch, req, resp, std::move(rpc)); +} +void ObjectLockInfoManager::ExportObjectLockInfo(tserver::DdlLockEntriesPB* resp) { + impl_->ExportObjectLockInfo(resp); +} +bool ObjectLockInfoManager::InsertOrAssign( + const std::string& tserver_uuid, std::shared_ptr info) { + return impl_->InsertOrAssign(tserver_uuid, info); +} +void ObjectLockInfoManager::Clear() { impl_->Clear(); } + +std::shared_ptr ObjectLockInfoManager::Impl::CreateOrGetObjectLockInfo( + const std::string& key) { + LockGuard lock(mutex_); + if (object_lock_infos_map_.contains(key)) { + return object_lock_infos_map_.at(key); + } else { + std::shared_ptr object_lock_info; + object_lock_info = std::make_shared(key); + object_lock_infos_map_[key] = object_lock_info; + return object_lock_info; + } +} + +Status ObjectLockInfoManager::Impl::RequestDone( + LeaderEpoch epoch, const tserver::AcquireObjectLockRequestPB& req) { + VLOG(3) << __PRETTY_FUNCTION__; + auto key = req.session_host_uuid(); + std::shared_ptr object_lock_info = CreateOrGetObjectLockInfo(key); + + auto lock = object_lock_info->LockForWrite(); + // TODO(Amit) Fetch and use the appropriate incarnation Id. + auto& sessions_map = (*lock.mutable_data()->pb.mutable_incarnations())[kIncarnationId]; + auto& db_map = (*sessions_map.mutable_sessions())[req.session_id()]; + for (const auto& object_lock : req.object_locks()) { + auto& object_map = (*db_map.mutable_dbs())[object_lock.database_oid()]; + auto& types = (*object_map.mutable_objects())[object_lock.object_oid()]; + types.add_lock_type(object_lock.lock_type()); + } + + RETURN_NOT_OK(catalog_manager_->sys_catalog()->Upsert(epoch, object_lock_info)); + lock.Commit(); + return Status::OK(); +} + +Status ObjectLockInfoManager::Impl::RequestDone( + LeaderEpoch epoch, const tserver::ReleaseObjectLockRequestPB& req) { + VLOG(3) << __PRETTY_FUNCTION__; + auto key = req.session_host_uuid(); + std::shared_ptr object_lock_info; + { + LockGuard lock(mutex_); + if (!object_lock_infos_map_.contains(key)) { + VLOG(1) << "Cannot release locks related to untracked host/session. Req: " + << req.DebugString(); + return Status::OK(); + } + object_lock_info = object_lock_infos_map_[key]; + } + auto lock = object_lock_info->LockForWrite(); + // TODO(Amit) Fetch and use the appropriate incarnation Id. + auto& sessions_map = (*lock.mutable_data()->pb.mutable_incarnations())[kIncarnationId]; + if (req.release_all_locks()) { + sessions_map.mutable_sessions()->erase(req.session_id()); + } else { + auto& db_map = (*sessions_map.mutable_sessions())[req.session_id()]; + for (const auto& object_lock : req.object_locks()) { + auto& object_map = (*db_map.mutable_dbs())[object_lock.database_oid()]; + object_map.mutable_objects()->erase(object_lock.object_oid()); + } + } + + RETURN_NOT_OK(catalog_manager_->sys_catalog()->Upsert(epoch, object_lock_info)); + lock.Commit(); + return Status::OK(); +} + +namespace { + +void ExportObjectLocksForSession( + const master::SysObjectLockEntryPB_DBObjectsMapPB& dbs_map, + tserver::AcquireObjectLockRequestPB* req) { + for (const auto& [db_id, objects_map] : dbs_map.dbs()) { + for (const auto& [object_id, lock_types] : objects_map.objects()) { + for (const auto& type : lock_types.lock_type()) { + auto* lock = req->add_object_locks(); + lock->set_database_oid(db_id); + lock->set_object_oid(object_id); + lock->set_lock_type(TableLockType(type)); + } + } + } +} + +} // namespace + +void ObjectLockInfoManager::Impl::ExportObjectLockInfo(tserver::DdlLockEntriesPB* resp) { + VLOG(2) << __PRETTY_FUNCTION__; + { + LockGuard lock(mutex_); + for (const auto& [host_uuid, per_host_entry] : object_lock_infos_map_) { + auto l = per_host_entry->LockForRead(); + // TODO(Amit) Fetch and use the appropriate incarnation Id. + auto sessions_map_it = l->pb.incarnations().find(kIncarnationId); + if (sessions_map_it == l->pb.incarnations().end()) { + continue; + } + for (const auto& [session_id, dbs_map] : sessions_map_it->second.sessions()) { + auto* lock_entries_pb = resp->add_lock_entries(); + lock_entries_pb->set_session_host_uuid(host_uuid); + lock_entries_pb->set_session_id(session_id); + ExportObjectLocksForSession(dbs_map, lock_entries_pb); + } + } + } + VLOG(3) << "Exported " << yb::ToString(*resp); +} + +void ObjectLockInfoManager::Impl::LockObject( + LeaderEpoch epoch, const AcquireObjectLockRequestPB* req, AcquireObjectLockResponsePB* resp, + rpc::RpcContext rpc) { + VLOG(3) << __PRETTY_FUNCTION__; + // TODO: Fix this. GetAllDescriptors may need to change to handle tserver membership reliably. + auto ts_descriptors = master_->ts_manager()->GetAllDescriptors(); + auto lock_objects = + std::make_shared>( + epoch, master_, catalog_manager_, this, std::move(ts_descriptors), *req, std::move(rpc)); + lock_objects->Launch(); +} + +void ObjectLockInfoManager::Impl::UnlockObject( + LeaderEpoch epoch, const ReleaseObjectLockRequestPB* req, ReleaseObjectLockResponsePB* resp, + rpc::RpcContext rpc) { + VLOG(3) << __PRETTY_FUNCTION__; + // TODO: Fix this. GetAllDescriptors may need to change to handle tserver membership reliably. + auto ts_descriptors = master_->ts_manager()->GetAllDescriptors(); + if (ts_descriptors.empty()) { + // TODO(Amit): Handle the case where the master receives a DDL lock/unlock request + // before all the TServers have registered + // (they may have registered with the older master-leader, thus serving DML reqs). + // Get rid of this check when we have done this. + rpc.RespondRpcFailure( + rpc::ErrorStatusPB::ERROR_APPLICATION, + STATUS(IllegalState, "No TServers registered with the master yet.")); + return; + } + auto unlock_objects = + std::make_shared>( + epoch, master_, catalog_manager_, this, std::move(ts_descriptors), *req, std::move(rpc)); + unlock_objects->Launch(); +} + +bool ObjectLockInfoManager::Impl::InsertOrAssign( + const std::string& tserver_uuid, std::shared_ptr info) { + LockGuard lock(mutex_); + return object_lock_infos_map_.insert_or_assign(tserver_uuid, info).second; +} + +void ObjectLockInfoManager::Impl::Clear() { + LockGuard lock(mutex_); + object_lock_infos_map_.clear(); +} + +template +UpdateAllTServers::UpdateAllTServers( + LeaderEpoch epoch, Master* master, CatalogManager* catalog_manager, + ObjectLockInfoManager::Impl* olm, TSDescriptorVector&& ts_descriptors, const Req& req, + rpc::RpcContext rpc) + : epoch_(epoch), + master_(master), + catalog_manager_(catalog_manager), + object_lock_info_manager_(olm), + ts_descriptors_(std::move(ts_descriptors)), + statuses_(ts_descriptors_.size(), STATUS(Uninitialized, "")), + req_(req), + context_(std::move(rpc)) { + VLOG(3) << __PRETTY_FUNCTION__; +} + +template +void UpdateAllTServers::Launch() { + LaunchFrom(0); +} + +template +void UpdateAllTServers::Done(size_t i, const Status& s) { + statuses_[i] = s; + // TODO: There is a potential here for early return if s is not OK. + if (--ts_pending_ == 0) { + CheckForDone(); + } +} + +template +void UpdateAllTServers::LaunchFrom(size_t start_idx) { + size_t num_descriptors = ts_descriptors_.size(); + ts_pending_ = num_descriptors - start_idx; + LOG(INFO) << __func__ << " launching for " << ts_pending_ << " tservers."; + for (size_t i = start_idx; i < num_descriptors; ++i) { + auto ts_uuid = ts_descriptors_[i]->permanent_uuid(); + LOG(INFO) << "Launching for " << ts_uuid; + auto callback = std::bind(&UpdateAllTServers::Done, this, i, std::placeholders::_1); + auto task = std::make_shared>( + master_, catalog_manager_->AsyncTaskPool(), ts_uuid, this->shared_from_this(), callback); + WARN_NOT_OK( + catalog_manager_->ScheduleTask(task), + yb::Format( + "Failed to schedule request to UpdateTServer to $0 for $1", ts_uuid, + request().DebugString())); + } +} + +template +void UpdateAllTServers::CheckForDone() { + for (const auto& status : statuses_) { + if (!status.ok()) { + LOG(INFO) << "Error in acquiring object lock: " << status; + // TBD: Release the taken locks. + // What is the best way to handle failures? Say one TServer errors out, + // Should we release the locks taken by the other TServers? What if one of the + // TServers had given a lock to the same session earlier -- would a release get + // rid of that lock as well? + // How to handle this during Release locks? + // + // What can cause failures here? + // - If a TServer goes down, how can we handle it? + context_.RespondRpcFailure(rpc::ErrorStatusPB::ERROR_APPLICATION, status); + return; + } + } + if (RelaunchIfNecessary()) { + return; + } + + LOG(INFO) << __PRETTY_FUNCTION__ << " is done. Updating sys catalog"; + // We are done. + // + // Update the master accordingly, after all the TServers. + // if master fails over, before all the TServers have responded, the tserver + // will need to retry the request at the new master-leader. + auto status = object_lock_info_manager_->RequestDone(epoch_, req_); + if (status.ok()) { + context_.RespondSuccess(); + } else { + LOG(WARNING) << "Failed to update object lock " << status; + context_.RespondRpcFailure(rpc::ErrorStatusPB::ERROR_APPLICATION, status); + } +} + +template +bool UpdateAllTServers::RelaunchIfNecessary() { + auto old_size = ts_descriptors_.size(); + auto ts_descriptors = master_->ts_manager()->GetAllDescriptors(); + for (auto ts_descriptor : ts_descriptors) { + if (std::find(ts_descriptors_.begin(), ts_descriptors_.end(), ts_descriptor) == + ts_descriptors_.end()) { + ts_descriptors_.push_back(ts_descriptor); + statuses_.push_back(Status::OK()); + } + } + if (ts_descriptors.size() == old_size) { + return false; + } + + LOG(INFO) << "New TServers were added. Relaunching."; + LaunchFrom(old_size); + return true; +} + +template +UpdateTServer::UpdateTServer( + Master* master, ThreadPool* callback_pool, const TabletServerId& ts_uuid, + std::shared_ptr> shared_all_tservers, StdStatusCallback callback) + : RetrySpecificTSRpcTask(master, callback_pool, ts_uuid, /* async_task_throttler */ nullptr), + callback_(std::move(callback)), + shared_all_tservers_(shared_all_tservers) {} + +template <> +bool UpdateTServer::SendRequest( + int attempt) { + VLOG(3) << ToString() << __func__ << " attempt " << attempt; + ts_proxy_->AcquireObjectLocksAsync(request(), &resp_, &rpc_, BindRpcCallback()); + return true; +} + +template <> +bool UpdateTServer::SendRequest( + int attempt) { + VLOG(3) << ToString() << __func__ << " attempt " << attempt; + ts_proxy_->ReleaseObjectLocksAsync(request(), &resp_, &rpc_, BindRpcCallback()); + return true; +} + +template <> +std::string UpdateTServer::description() + const { + return Format("Acquire object lock for $0 at $1", request().DebugString(), permanent_uuid_); +} + +template <> +std::string UpdateTServer::description() + const { + return Format("Release object lock for $0 at $1", request().DebugString(), permanent_uuid_); +} + +template +void UpdateTServer::HandleResponse(int attempt) { + VLOG(3) << ToString() << __func__ << " response is " << yb::ToString(resp_); + Status status; + if (resp_.has_error()) { + status = StatusFromPB(resp_.error().status()); + TransitionToFailedState(server::MonitoredTaskState::kRunning, status); + } else { + TransitionToCompleteState(); + } +} + +template +void UpdateTServer::Finished(const Status& status) { + VLOG(3) << ToString() << __func__ << " (" << status << ")"; + callback_(status); +} + +} // namespace master +} // namespace yb diff --git a/src/yb/master/object_lock_info_manager.h b/src/yb/master/object_lock_info_manager.h new file mode 100644 index 000000000000..5118d0a293f6 --- /dev/null +++ b/src/yb/master/object_lock_info_manager.h @@ -0,0 +1,62 @@ +// Copyright (c) YugaByte, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License +// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +// or implied. See the License for the specific language governing permissions and limitations +// under the License. +// + +#pragma once + +#include +#include +#include + +#include "yb/master/master_fwd.h" + +namespace yb::rpc { +class RpcContext; +} +namespace yb::tserver { +class AcquireObjectLockRequestPB; +class AcquireObjectLockResponsePB; +class ReleaseObjectLockRequestPB; +class ReleaseObjectLockResponsePB; +class DdlLockEntriesPB; +} // namespace yb::tserver + +namespace yb::master { + +struct LeaderEpoch; +class ObjectLockInfo; + +class ObjectLockInfoManager { + public: + ObjectLockInfoManager(Master* master, CatalogManager* catalog_manager); + virtual ~ObjectLockInfoManager(); + + void LockObject( + LeaderEpoch epoch, const tserver::AcquireObjectLockRequestPB* req, + tserver::AcquireObjectLockResponsePB* resp, rpc::RpcContext rpc); + + void UnlockObject( + LeaderEpoch epoch, const tserver::ReleaseObjectLockRequestPB* req, + tserver::ReleaseObjectLockResponsePB* resp, rpc::RpcContext rpc); + + void ExportObjectLockInfo(tserver::DdlLockEntriesPB* resp); + bool InsertOrAssign(const std::string& tserver_uuid, std::shared_ptr info); + void Clear(); + + private: + template + friend class UpdateAllTServers; + class Impl; + std::unique_ptr impl_; +}; + +} // namespace yb::master diff --git a/src/yb/master/sys_catalog.cc b/src/yb/master/sys_catalog.cc index 1577a4023dce..45612ca0b981 100644 --- a/src/yb/master/sys_catalog.cc +++ b/src/yb/master/sys_catalog.cc @@ -926,7 +926,7 @@ Status SysCatalogTable::ReadWithRestarts( do { if (read_restart_ht.is_valid()) { safe_ht_to_read = VERIFY_RESULT(tablet->SafeTime(require_lease, read_restart_ht)); - VLOG(0) << __func__ << " restarting read with ht = " << safe_ht_to_read + VLOG(3) << __func__ << " restarting read with ht = " << safe_ht_to_read << " >= " << read_restart_ht << ". Encountered read restart when reading at " << read_time.ToString(); read_time.read.MakeAtLeast(safe_ht_to_read); diff --git a/src/yb/master/sys_catalog_writer.cc b/src/yb/master/sys_catalog_writer.cc index aff235f0bcfb..9600911483e6 100644 --- a/src/yb/master/sys_catalog_writer.cc +++ b/src/yb/master/sys_catalog_writer.cc @@ -109,7 +109,6 @@ Status SysCatalogWriter::DoMutateItem( return Status::OK(); } - VLOG(2) << "Updating item " << item_id << " in catalog: " << diff; } return FillSysCatalogWriteRequest( diff --git a/src/yb/tserver/heartbeater.cc b/src/yb/tserver/heartbeater.cc index 813bc45558c7..3755cdd47df7 100644 --- a/src/yb/tserver/heartbeater.cc +++ b/src/yb/tserver/heartbeater.cc @@ -463,6 +463,11 @@ Status Heartbeater::Thread::TryHeartbeat() { auto universe_uuid = VERIFY_RESULT(UniverseUuid::FromString(resp.universe_uuid())); RETURN_NOT_OK(server_->ValidateAndMaybeSetUniverseUuid(universe_uuid)); } + if (resp.has_ddl_lock_entries()) { + WARN_NOT_OK( + server_->BootstrapDdlObjectLocks(resp), + "Error bootstrapping object locks. Not expected."); + } if (resp.has_error()) { switch (resp.error().code()) { diff --git a/src/yb/tserver/tablet_server.cc b/src/yb/tserver/tablet_server.cc index 10c5ddf901e9..4f1b57dba386 100644 --- a/src/yb/tserver/tablet_server.cc +++ b/src/yb/tserver/tablet_server.cc @@ -743,6 +743,14 @@ void TabletServer::Shutdown() { LOG(INFO) << "TabletServer shut down complete. Bye!"; } +Status TabletServer::BootstrapDdlObjectLocks(const master::TSHeartbeatResponsePB& heartbeat_resp) { + VLOG(2) << __func__; + if (!heartbeat_resp.has_ddl_lock_entries() || !ts_local_lock_manager_) { + return Status::OK(); + } + return ts_local_lock_manager_->BootstrapDdlObjectLocks(heartbeat_resp.ddl_lock_entries()); +} + Status TabletServer::PopulateLiveTServers(const master::TSHeartbeatResponsePB& heartbeat_resp) { std::lock_guard l(lock_); // We reset the list each time, since we want to keep the tservers that are live from the diff --git a/src/yb/tserver/tablet_server.h b/src/yb/tserver/tablet_server.h index 41fa6d288b28..6fba940366ef 100644 --- a/src/yb/tserver/tablet_server.h +++ b/src/yb/tserver/tablet_server.h @@ -191,6 +191,7 @@ class TabletServer : public DbServerBase, public TabletServerIf { } Status PopulateLiveTServers(const master::TSHeartbeatResponsePB& heartbeat_resp) EXCLUDES(lock_); + Status BootstrapDdlObjectLocks(const master::TSHeartbeatResponsePB& heartbeat_resp); Status GetLiveTServers( std::vector *live_tservers) const EXCLUDES(lock_) override; diff --git a/src/yb/tserver/ts_local_lock_manager-test.cc b/src/yb/tserver/ts_local_lock_manager-test.cc index 1374a118c6d5..c65847f0b037 100644 --- a/src/yb/tserver/ts_local_lock_manager-test.cc +++ b/src/yb/tserver/ts_local_lock_manager-test.cc @@ -34,7 +34,9 @@ constexpr auto kObject1 = 1; class TSLocalLockManagerTest : public YBTest { protected: - TSLocalLockManagerTest() = default; + TSLocalLockManagerTest() { + lm_.TEST_MarkBootstrapped(); + } tablet::TSLocalLockManager lm_; diff --git a/src/yb/tserver/ts_local_lock_manager.cc b/src/yb/tserver/ts_local_lock_manager.cc index a6329fae8bd3..47118ce7b257 100644 --- a/src/yb/tserver/ts_local_lock_manager.cc +++ b/src/yb/tserver/ts_local_lock_manager.cc @@ -18,8 +18,11 @@ #include "yb/docdb/docdb.h" #include "yb/docdb/docdb_fwd.h" #include "yb/docdb/shared_lock_manager.h" +#include "yb/util/backoff_waiter.h" +#include "yb/util/monotime.h" #include "yb/util/scope_exit.h" +using namespace std::literals; DECLARE_bool(dump_lock_keys); namespace yb::tablet { @@ -31,7 +34,12 @@ class TSLocalLockManager::Impl { ~Impl() = default; Status AcquireObjectLocks( - const tserver::AcquireObjectLockRequestPB& req, CoarseTimePoint deadline) { + const tserver::AcquireObjectLockRequestPB& req, CoarseTimePoint deadline, + WaitForBootstrap wait) { + if (wait) { + RETURN_NOT_OK( + Wait([this]() -> bool { return is_bootstrapped_; }, deadline, "Waiting to Bootstrap.")); + } // There should be atmost one outstanding request per session that is actively being processed // by the TSLocalLockManager. In context of table locks, either the pg backend or the pg client // service should be responsible for this behavior. Else this could lead to invalid lock state @@ -84,6 +92,14 @@ class TSLocalLockManager::Impl { return Status::OK(); } + void MarkBootstrapped() { + is_bootstrapped_ = true; + } + + bool IsBootstrapped() const { + return is_bootstrapped_; + } + size_t TEST_GrantedLocksSize() const { return object_lock_manager_.TEST_GrantedLocksSize(); } @@ -96,6 +112,27 @@ class TSLocalLockManager::Impl { object_lock_manager_.DumpStatusHtml(out); } + Status BootstrapDdlObjectLocks(const tserver::DdlLockEntriesPB& entries) { + VLOG(2) << __func__ << " using " << yb::ToString(entries.lock_entries()); + // TODO(amit): 1) When we implement YSQL leases, we need to clear out the locks, and + // re-bootstrap. For now, we are not doing that, the only time this should be happening + // is when a tserver registers with the master for the first time. + // 2) If the tserver is already bootstrapped from a master, we should not be bootstrapping + // again. However, even if we are bootstrap again, it should be safe to do so. Once we implement + // persistence of TServer Registration at the master, we can avoid this. + if (IsBootstrapped()) { + LOG_WITH_FUNC(INFO) << "TSLocalLockManager is already bootstrapped. Ignoring the request."; + return Status::OK(); + } + for (const auto& acquire_req : entries.lock_entries()) { + // This call should not block on anything. + CoarseTimePoint deadline = CoarseMonoClock::Now() + 1s; + RETURN_NOT_OK(AcquireObjectLocks(acquire_req, deadline, tablet::WaitForBootstrap::kFalse)); + } + MarkBootstrapped(); + return Status::OK(); + } + private: Status AddActiveSession(const docdb::SessionIDHostPair& session_pair) EXCLUDES(mutex_) { std::lock_guard lock(mutex_); @@ -119,6 +156,7 @@ class TSLocalLockManager::Impl { std::unordered_set> sessions_with_active_requests_ GUARDED_BY(mutex_); + std::atomic_bool is_bootstrapped_{false}; }; TSLocalLockManager::TSLocalLockManager() : impl_(new Impl()) {} @@ -126,8 +164,9 @@ TSLocalLockManager::TSLocalLockManager() : impl_(new Impl()) {} TSLocalLockManager::~TSLocalLockManager() {} Status TSLocalLockManager::AcquireObjectLocks( - const tserver::AcquireObjectLockRequestPB& req, CoarseTimePoint deadline) { - return impl_->AcquireObjectLocks(req, deadline); + const tserver::AcquireObjectLockRequestPB& req, CoarseTimePoint deadline, + WaitForBootstrap wait) { + return impl_->AcquireObjectLocks(req, deadline, wait); } Status TSLocalLockManager::ReleaseObjectLocks(const tserver::ReleaseObjectLockRequestPB& req) { @@ -146,4 +185,12 @@ size_t TSLocalLockManager::TEST_WaitingLocksSize() const { return impl_->TEST_WaitingLocksSize(); } +Status TSLocalLockManager::BootstrapDdlObjectLocks(const tserver::DdlLockEntriesPB& entries) { + return impl_->BootstrapDdlObjectLocks(entries); +} + +void TSLocalLockManager::TEST_MarkBootstrapped() { + impl_->MarkBootstrapped(); +} + } // namespace yb::tablet diff --git a/src/yb/tserver/ts_local_lock_manager.h b/src/yb/tserver/ts_local_lock_manager.h index d690027ea197..b754297cbb81 100644 --- a/src/yb/tserver/ts_local_lock_manager.h +++ b/src/yb/tserver/ts_local_lock_manager.h @@ -28,6 +28,8 @@ namespace yb::tablet { +YB_STRONGLY_TYPED_BOOL(WaitForBootstrap); + // LockManager for acquiring table/object locks of type TableLockType on a given object id. // TSLocalLockManager uses LockManagerImpl to acheive the locking/unlocking // behavior, yet the scope of the object lock is not just limited to the scope of the lock rpc @@ -68,7 +70,8 @@ class TSLocalLockManager { // // TODO: Augment the 'pg_locks' path to show the acquired/waiting object/table level locks. Status AcquireObjectLocks( - const tserver::AcquireObjectLockRequestPB& req, CoarseTimePoint deadline); + const tserver::AcquireObjectLockRequestPB& req, CoarseTimePoint deadline, + WaitForBootstrap wait = WaitForBootstrap::kTrue); // The call releases all locks on the object(s) corresponding to the session id-host pair. There // is no 1:1 mapping that exists among lock and unlock requests. A session can acquire different @@ -79,8 +82,11 @@ class TSLocalLockManager { Status ReleaseObjectLocks(const tserver::ReleaseObjectLockRequestPB& req); void DumpLocksToHtml(std::ostream& out); + Status BootstrapDdlObjectLocks(const tserver::DdlLockEntriesPB& resp); + size_t TEST_GrantedLocksSize() const; size_t TEST_WaitingLocksSize() const; + void TEST_MarkBootstrapped(); private: class Impl; diff --git a/src/yb/tserver/tserver.proto b/src/yb/tserver/tserver.proto index 82edce3abf77..560a76760ced 100644 --- a/src/yb/tserver/tserver.proto +++ b/src/yb/tserver/tserver.proto @@ -426,3 +426,7 @@ message ReleaseObjectLockRequestPB { message ReleaseObjectLockResponsePB { optional TabletServerErrorPB error = 1; } + +message DdlLockEntriesPB { + repeated AcquireObjectLockRequestPB lock_entries = 1; +} From d4103e809e0662c5cd4a51f9dfafb44ce210d38c Mon Sep 17 00:00:00 2001 From: Dmitry Uspenskiy <47734295+d-uspenskiy@users.noreply.github.com> Date: Wed, 18 Sep 2024 12:40:29 +0300 Subject: [PATCH 75/75] [#23513] YSQL: Fix broken org.yb.pgsql.TestYsqlMetrics#testExplainMaxMemory unit test Summary: Signature of the `YBCGetPgggateCurrentAllocatedBytes` function was changed in the https://phorge.dev.yugabyte.com/D37357 diff. But the usage of the function was not updated properly. Test Plan: Jenkins Reviewers: jason Reviewed By: jason Subscribers: yql Tags: #jenkins-ready Differential Revision: https://phorge.dev.yugabyte.com/D38120 --- src/postgres/src/backend/utils/mmgr/mcxt.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/postgres/src/backend/utils/mmgr/mcxt.c b/src/postgres/src/backend/utils/mmgr/mcxt.c index c308bd151a55..35b358f90fd2 100644 --- a/src/postgres/src/backend/utils/mmgr/mcxt.c +++ b/src/postgres/src/backend/utils/mmgr/mcxt.c @@ -61,9 +61,7 @@ static Size YbSnapshotMemory() { #if YB_TCMALLOC_ENABLED - int64_t cur_tc_actual_sz = 0; - YBCGetPgggateCurrentAllocatedBytes(&cur_tc_actual_sz); - return cur_tc_actual_sz; + return YBCGetPgggateCurrentAllocatedBytes(); #else return PgMemTracker.pg_cur_mem_bytes; #endif