diff --git a/.github/workflows/update_releases.yaml b/.github/workflows/update_releases.yaml
index 86427d57d0f7..cb65bf0eab4c 100644
--- a/.github/workflows/update_releases.yaml
+++ b/.github/workflows/update_releases.yaml
@@ -31,7 +31,6 @@ jobs:
- "release-23.2"
- "release-24.1"
- "release-24.2"
- - "release-24.3"
name: Update pkg/testutils/release/cockroach_releases.yaml on ${{ matrix.branch }}
runs-on: ubuntu-latest
steps:
diff --git a/DEPS.bzl b/DEPS.bzl
index b94847fa2ff4..ff369f2a4c80 100644
--- a/DEPS.bzl
+++ b/DEPS.bzl
@@ -1818,10 +1818,10 @@ def go_deps():
patches = [
"@com_github_cockroachdb_cockroach//build/patches:com_github_cockroachdb_pebble.patch",
],
- sha256 = "f6c51815ce324431298319df983bf15f03f6704d85eca3e876f239423a40cc77",
- strip_prefix = "github.com/cockroachdb/pebble@v0.0.0-20241024171322-faa0ea57dbc3",
+ sha256 = "73d1337f530367d024edd2016d6fc6eac589f8cafc6d82a2c452eb623ce01ad3",
+ strip_prefix = "github.com/cockroachdb/pebble@v0.0.0-20241028145347-405b4e1228d5",
urls = [
- "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20241024171322-faa0ea57dbc3.zip",
+ "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20241028145347-405b4e1228d5.zip",
],
)
go_repository(
diff --git a/build/bazelutil/BUILD.bazel b/build/bazelutil/BUILD.bazel
index a04b0dafee40..9cc1e6a6445a 100644
--- a/build/bazelutil/BUILD.bazel
+++ b/build/bazelutil/BUILD.bazel
@@ -1,19 +1,7 @@
exports_files(["nogo_config.json"])
-load("@bazel_skylib//rules:analysis_test.bzl", "analysis_test")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
-analysis_test(
- name = "test_nogo_configured",
- targets = select(
- {
- "//build/toolchains:nogo": [],
- "//build/toolchains:nonogo_explicit": [],
- },
- no_match_error = "must use exactly one of `--config lintonbuild` or `--config nolintonbuild` explicitly",
- ),
-)
-
# The output file will be empty unless we're using the force_build_cdeps config.
genrule(
name = "test_force_build_cdeps",
diff --git a/build/bazelutil/distdir_files.bzl b/build/bazelutil/distdir_files.bzl
index 5196a6d7e15f..ab0dca1982f3 100644
--- a/build/bazelutil/distdir_files.bzl
+++ b/build/bazelutil/distdir_files.bzl
@@ -345,7 +345,7 @@ DISTDIR_FILES = {
"https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/gostdlib/com_github_cockroachdb_gostdlib-v1.19.0.zip": "c4d516bcfe8c07b6fc09b8a9a07a95065b36c2855627cb3514e40c98f872b69e",
"https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/logtags/com_github_cockroachdb_logtags-v0.0.0-20230118201751-21c54148d20b.zip": "ca7776f47e5fecb4c495490a679036bfc29d95bd7625290cfdb9abb0baf97476",
"https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/metamorphic/com_github_cockroachdb_metamorphic-v0.0.0-20231108215700-4ba948b56895.zip": "28c8cf42192951b69378cf537be5a9a43f2aeb35542908cc4fe5f689505853ea",
- "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20241024171322-faa0ea57dbc3.zip": "f6c51815ce324431298319df983bf15f03f6704d85eca3e876f239423a40cc77",
+ "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20241028145347-405b4e1228d5.zip": "73d1337f530367d024edd2016d6fc6eac589f8cafc6d82a2c452eb623ce01ad3",
"https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/redact/com_github_cockroachdb_redact-v1.1.5.zip": "11b30528eb0dafc8bc1a5ba39d81277c257cbe6946a7564402f588357c164560",
"https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/returncheck/com_github_cockroachdb_returncheck-v0.0.0-20200612231554-92cdbca611dd.zip": "ce92ba4352deec995b1f2eecf16eba7f5d51f5aa245a1c362dfe24c83d31f82b",
"https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/stress/com_github_cockroachdb_stress-v0.0.0-20220803192808-1806698b1b7b.zip": "3fda531795c600daf25532a4f98be2a1335cd1e5e182c72789bca79f5f69fcc1",
diff --git a/build/bazelutil/nogo_config.json b/build/bazelutil/nogo_config.json
index a5a3316d1cee..c15a472ce358 100644
--- a/build/bazelutil/nogo_config.json
+++ b/build/bazelutil/nogo_config.json
@@ -1624,7 +1624,7 @@
"github.com/elastic/gosigar": "third-party",
"github.com/shirou/gopsutil": "third-party code",
"pkg/.*\\.eg\\.go$": "generated code",
- ".*\\.pb(_[0-9]+)?\\.go$": "generated code",
+ ".*\\.pb\\.go$": "generated code",
".*\\.pb\\.gw\\.go$": "generated code",
"pkg/.*_generated\\.go$": "generated code"
}
diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_common.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_common.sh
index cfd956aa1e16..760cd3366b16 100755
--- a/build/teamcity/cockroach/nightlies/pebble_nightly_common.sh
+++ b/build/teamcity/cockroach/nightlies/pebble_nightly_common.sh
@@ -39,10 +39,10 @@ cp $BAZEL_BIN/pkg/cmd/roachtest/roachtest_/roachtest bin
chmod a+w bin/roachtest
# Pull in the latest version of Pebble from upstream. The benchmarks run
-# against the tip of the 'master' branch. We do this by `go get`ting the
+# against the tip of the 'crl-release-24.3' branch. We do this by `go get`ting the
# latest version of the module, and then running `mirror` to update `DEPS.bzl`
# accordingly.
-bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@master
+bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@crl-release-24.3
NEW_DEPS_BZL_CONTENT=$(bazel run //pkg/cmd/mirror/go:mirror)
echo "$NEW_DEPS_BZL_CONTENT" > DEPS.bzl
bazel build @com_github_cockroachdb_pebble//cmd/pebble --config ci -c opt
diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic.sh
index 43fa1e6a191c..d765c4f38d80 100755
--- a/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic.sh
+++ b/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic.sh
@@ -19,10 +19,10 @@ source "$dir/teamcity-bazel-support.sh" # For run_bazel
mkdir -p artifacts
# Pull in the latest version of Pebble from upstream. The benchmarks run
-# against the tip of the 'master' branch. We do this by `go get`ting the
+# against the tip of the 'crl-release-24.3' branch. We do this by `go get`ting the
# latest version of the module, and then running `mirror` to update `DEPS.bzl`
# accordingly.
-bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@master
+bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@crl-release-24.3
# Just dump the diff to see what, if anything, has changed.
git diff
NEW_DEPS_BZL_CONTENT=$(bazel run //pkg/cmd/mirror/go:mirror)
diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_race.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_race.sh
index 949d52ba2e68..262ffefee5da 100755
--- a/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_race.sh
+++ b/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_race.sh
@@ -19,10 +19,10 @@ source "$dir/teamcity-bazel-support.sh" # For run_bazel
mkdir -p artifacts
# Pull in the latest version of Pebble from upstream. The benchmarks run
-# against the tip of the 'master' branch. We do this by `go get`ting the
+# against the tip of the 'crl-release-24.3' branch. We do this by `go get`ting the
# latest version of the module, and then running `mirror` to update `DEPS.bzl`
# accordingly.
-bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@master
+bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@crl-release-24.3
# Just dump the diff to see what, if anything, has changed.
git diff
NEW_DEPS_BZL_CONTENT=$(bazel run //pkg/cmd/mirror/go:mirror)
diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_two_instance.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_two_instance.sh
index 665851a340d7..3d14ea510446 100755
--- a/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_two_instance.sh
+++ b/build/teamcity/cockroach/nightlies/pebble_nightly_metamorphic_two_instance.sh
@@ -19,10 +19,10 @@ source "$dir/teamcity-bazel-support.sh" # For run_bazel
mkdir -p artifacts
# Pull in the latest version of Pebble from upstream. The benchmarks run
-# against the tip of the 'master' branch. We do this by `go get`ting the
+# against the tip of the 'crl-release-24.3' branch. We do this by `go get`ting the
# latest version of the module, and then running `mirror` to update `DEPS.bzl`
# accordingly.
-bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@master
+bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@crl-release-24.3
# Just dump the diff to see what, if anything, has changed.
git diff
NEW_DEPS_BZL_CONTENT=$(bazel run //pkg/cmd/mirror/go:mirror)
diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_race_common.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_race_common.sh
index ab23fb324e8f..e0f330feaf45 100755
--- a/build/teamcity/cockroach/nightlies/pebble_nightly_race_common.sh
+++ b/build/teamcity/cockroach/nightlies/pebble_nightly_race_common.sh
@@ -39,10 +39,10 @@ cp $BAZEL_BIN/pkg/cmd/roachtest/roachtest_/roachtest bin
chmod a+w bin/roachtest
# Pull in the latest version of Pebble from upstream. The benchmarks run
-# against the tip of the 'master' branch. We do this by `go get`ting the
+# against the tip of the 'crl-release-24.3' branch. We do this by `go get`ting the
# latest version of the module, and then running `mirror` to update `DEPS.bzl`
# accordingly.
-bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@master
+bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@crl-release-24.3
NEW_DEPS_BZL_CONTENT=$(bazel run //pkg/cmd/mirror/go:mirror)
echo "$NEW_DEPS_BZL_CONTENT" > DEPS.bzl
bazel build @com_github_cockroachdb_pebble//cmd/pebble --config race --config ci -c opt
diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_race.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_race.sh
index 7a57608aca22..b1242cc262a9 100755
--- a/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_race.sh
+++ b/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_race.sh
@@ -8,7 +8,7 @@
#
# This script runs the Pebble Nightly YCSB A benchmark with the race flag.
# It is used to detect data races which may have been introduced to the latest
-# Pebble master branch.
+# Pebble crl-release-24.3 branch.
#
# It is run by the Pebble Nightly YCSB A race TeamCity build
# configuration.
diff --git a/cloud/kubernetes/bring-your-own-certs/client.yaml b/cloud/kubernetes/bring-your-own-certs/client.yaml
index 0a34277d992d..7009e93e57e8 100644
--- a/cloud/kubernetes/bring-your-own-certs/client.yaml
+++ b/cloud/kubernetes/bring-your-own-certs/client.yaml
@@ -20,7 +20,7 @@ spec:
serviceAccountName: cockroachdb
containers:
- name: cockroachdb-client
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
# Keep a pod open indefinitely so kubectl exec can be used to get a shell to it
# and run cockroach client commands, such as cockroach sql, cockroach node status, etc.
command:
diff --git a/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml b/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml
index 16e05b5660a9..b10c23a4a9b6 100644
--- a/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml
+++ b/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml
@@ -153,7 +153,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
# TODO: Change these to appropriate values for the hardware that you're running. You can see
# the resources that can be allocated on each of your Kubernetes nodes by running:
diff --git a/cloud/kubernetes/client-secure.yaml b/cloud/kubernetes/client-secure.yaml
index 6ae945a25471..1d419f5aadca 100644
--- a/cloud/kubernetes/client-secure.yaml
+++ b/cloud/kubernetes/client-secure.yaml
@@ -32,7 +32,7 @@ spec:
mountPath: /cockroach-certs
containers:
- name: cockroachdb-client
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
volumeMounts:
- name: client-certs
diff --git a/cloud/kubernetes/cluster-init-secure.yaml b/cloud/kubernetes/cluster-init-secure.yaml
index 3cf6867153c8..fce2b833d4be 100644
--- a/cloud/kubernetes/cluster-init-secure.yaml
+++ b/cloud/kubernetes/cluster-init-secure.yaml
@@ -34,7 +34,7 @@ spec:
mountPath: /cockroach-certs
containers:
- name: cluster-init
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
volumeMounts:
- name: client-certs
diff --git a/cloud/kubernetes/cluster-init.yaml b/cloud/kubernetes/cluster-init.yaml
index 4371f3d5a340..38ddd19e35c5 100644
--- a/cloud/kubernetes/cluster-init.yaml
+++ b/cloud/kubernetes/cluster-init.yaml
@@ -10,7 +10,7 @@ spec:
spec:
containers:
- name: cluster-init
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
command:
- "/cockroach/cockroach"
diff --git a/cloud/kubernetes/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/cockroachdb-statefulset-secure.yaml
index 3721c00b2d90..da43d6f63af1 100644
--- a/cloud/kubernetes/cockroachdb-statefulset-secure.yaml
+++ b/cloud/kubernetes/cockroachdb-statefulset-secure.yaml
@@ -195,7 +195,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
# TODO: Change these to appropriate values for the hardware that you're running. You can see
# the resources that can be allocated on each of your Kubernetes nodes by running:
diff --git a/cloud/kubernetes/cockroachdb-statefulset.yaml b/cloud/kubernetes/cockroachdb-statefulset.yaml
index b633e5eccd9a..f7d84da30a15 100644
--- a/cloud/kubernetes/cockroachdb-statefulset.yaml
+++ b/cloud/kubernetes/cockroachdb-statefulset.yaml
@@ -98,7 +98,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
# TODO: Change these to appropriate values for the hardware that you're running. You can see
# the resources that can be allocated on each of your Kubernetes nodes by running:
diff --git a/cloud/kubernetes/multiregion/client-secure.yaml b/cloud/kubernetes/multiregion/client-secure.yaml
index a8d92b65bc12..2877c8156af9 100644
--- a/cloud/kubernetes/multiregion/client-secure.yaml
+++ b/cloud/kubernetes/multiregion/client-secure.yaml
@@ -9,7 +9,7 @@ spec:
serviceAccountName: cockroachdb
containers:
- name: cockroachdb-client
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
volumeMounts:
- name: client-certs
diff --git a/cloud/kubernetes/multiregion/cluster-init-secure.yaml b/cloud/kubernetes/multiregion/cluster-init-secure.yaml
index 6abcc5289c70..aec679ba0eba 100644
--- a/cloud/kubernetes/multiregion/cluster-init-secure.yaml
+++ b/cloud/kubernetes/multiregion/cluster-init-secure.yaml
@@ -11,7 +11,7 @@ spec:
serviceAccountName: cockroachdb
containers:
- name: cluster-init
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
volumeMounts:
- name: client-certs
diff --git a/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml
index faf761c3f564..a8441f5344b7 100644
--- a/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml
+++ b/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml
@@ -167,7 +167,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 26257
diff --git a/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml b/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml
index bfc440aa7d14..37af4c6c3063 100644
--- a/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml
+++ b/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml
@@ -185,7 +185,7 @@ spec:
name: cockroach-env
containers:
- name: cockroachdb
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
# TODO: Change these to appropriate values for the hardware that you're running. You can see
# the resources that can be allocated on each of your Kubernetes nodes by running:
diff --git a/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml b/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml
index 25e1eaac25ca..43304b8e148e 100644
--- a/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml
+++ b/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml
@@ -82,7 +82,7 @@ spec:
hostNetwork: true
containers:
- name: cockroachdb
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
# TODO: If you configured taints to give CockroachDB exclusive access to nodes, feel free
# to remove the requests and limits sections. If you didn't, you'll need to change these to
diff --git a/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml b/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml
index d655a7c32aba..ce66776e4605 100644
--- a/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml
+++ b/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml
@@ -198,7 +198,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
# TODO: If you configured taints to give CockroachDB exclusive access to nodes, feel free
# to remove the requests and limits sections. If you didn't, you'll need to change these to
diff --git a/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml b/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml
index cb6fbff5bea7..f52108673a7a 100644
--- a/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml
+++ b/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml
@@ -141,7 +141,7 @@ spec:
- name: cockroachdb
# NOTE: Always use the most recent version of CockroachDB for the best
# performance and reliability.
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
# TODO: Change these to appropriate values for the hardware that you're running. You can see
# the resources that can be allocated on each of your Kubernetes nodes by running:
diff --git a/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml
index eb5ad0e477a2..6c5fdba96999 100644
--- a/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml
+++ b/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml
@@ -232,7 +232,7 @@ spec:
- name: cockroachdb
# NOTE: Always use the most recent version of CockroachDB for the best
# performance and reliability.
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
# TODO: Change these to appropriate values for the hardware that you're running. You can see
# the resources that can be allocated on each of your Kubernetes nodes by running:
diff --git a/cloud/kubernetes/v1.6/client-secure.yaml b/cloud/kubernetes/v1.6/client-secure.yaml
index a9d63167acd5..135234c1030c 100644
--- a/cloud/kubernetes/v1.6/client-secure.yaml
+++ b/cloud/kubernetes/v1.6/client-secure.yaml
@@ -32,7 +32,7 @@ spec:
mountPath: /cockroach-certs
containers:
- name: cockroachdb-client
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
volumeMounts:
- name: client-certs
diff --git a/cloud/kubernetes/v1.6/cluster-init-secure.yaml b/cloud/kubernetes/v1.6/cluster-init-secure.yaml
index 5842b451df71..fc027edff123 100644
--- a/cloud/kubernetes/v1.6/cluster-init-secure.yaml
+++ b/cloud/kubernetes/v1.6/cluster-init-secure.yaml
@@ -34,7 +34,7 @@ spec:
mountPath: /cockroach-certs
containers:
- name: cluster-init
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
volumeMounts:
- name: client-certs
diff --git a/cloud/kubernetes/v1.6/cluster-init.yaml b/cloud/kubernetes/v1.6/cluster-init.yaml
index d367fb051546..207bf4b62cf1 100644
--- a/cloud/kubernetes/v1.6/cluster-init.yaml
+++ b/cloud/kubernetes/v1.6/cluster-init.yaml
@@ -10,7 +10,7 @@ spec:
spec:
containers:
- name: cluster-init
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
command:
- "/cockroach/cockroach"
diff --git a/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml
index ef0ce8eb516c..e793e5f85f27 100644
--- a/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml
+++ b/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml
@@ -178,7 +178,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 26257
diff --git a/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml b/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml
index b8b3e23a07c3..6c1b027674d7 100644
--- a/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml
+++ b/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml
@@ -81,7 +81,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 26257
diff --git a/cloud/kubernetes/v1.7/client-secure.yaml b/cloud/kubernetes/v1.7/client-secure.yaml
index 17c71ed3f052..f6a20c92eac1 100644
--- a/cloud/kubernetes/v1.7/client-secure.yaml
+++ b/cloud/kubernetes/v1.7/client-secure.yaml
@@ -32,7 +32,7 @@ spec:
mountPath: /cockroach-certs
containers:
- name: cockroachdb-client
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
volumeMounts:
- name: client-certs
diff --git a/cloud/kubernetes/v1.7/cluster-init-secure.yaml b/cloud/kubernetes/v1.7/cluster-init-secure.yaml
index 3c6832faf9f0..cc4a898efdc1 100644
--- a/cloud/kubernetes/v1.7/cluster-init-secure.yaml
+++ b/cloud/kubernetes/v1.7/cluster-init-secure.yaml
@@ -34,7 +34,7 @@ spec:
mountPath: /cockroach-certs
containers:
- name: cluster-init
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
volumeMounts:
- name: client-certs
diff --git a/cloud/kubernetes/v1.7/cluster-init.yaml b/cloud/kubernetes/v1.7/cluster-init.yaml
index 1a1b05cfb145..3a11c5212288 100644
--- a/cloud/kubernetes/v1.7/cluster-init.yaml
+++ b/cloud/kubernetes/v1.7/cluster-init.yaml
@@ -10,7 +10,7 @@ spec:
spec:
containers:
- name: cluster-init
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
command:
- "/cockroach/cockroach"
diff --git a/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml
index fbf690bb1da2..0511269820c6 100644
--- a/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml
+++ b/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml
@@ -190,7 +190,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 26257
diff --git a/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml b/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml
index 3077bf5617a5..1bfeca7c106f 100644
--- a/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml
+++ b/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml
@@ -93,7 +93,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
- image: cockroachdb/cockroach:v24.2.4
+ image: cockroachdb/cockroach:v24.2.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 26257
diff --git a/docs/generated/metrics/metrics.html b/docs/generated/metrics/metrics.html
index fe659599b919..aa039becce99 100644
--- a/docs/generated/metrics/metrics.html
+++ b/docs/generated/metrics/metrics.html
@@ -1524,9 +1524,9 @@
APPLICATION obs.tablemetadata.update_job.table_updates The total number of rows that have been updated in system.table_metadata Rows Updated COUNTER COUNT AVG NON_NEGATIVE_DERIVATIVE
APPLICATION physical_replication.admit_latency Event admission latency: a difference between event MVCC timestamp and the time it was admitted into ingestion processor Nanoseconds HISTOGRAM NANOSECONDS AVG NONE
APPLICATION physical_replication.commit_latency Event commit latency: a difference between event MVCC timestamp and the time it was flushed into disk. If we batch events, then the difference between the oldest event in the batch and flush is recorded Nanoseconds HISTOGRAM NANOSECONDS AVG NONE
-APPLICATION physical_replication.cutover_progress The number of ranges left to revert in order to complete an inflight cutover Ranges GAUGE COUNT AVG NONE
APPLICATION physical_replication.distsql_replan_count Total number of dist sql replanning events Events COUNTER COUNT AVG NON_NEGATIVE_DERIVATIVE
APPLICATION physical_replication.events_ingested Events ingested by all replication jobs Events COUNTER COUNT AVG NON_NEGATIVE_DERIVATIVE
+APPLICATION physical_replication.failover_progress The number of ranges left to revert in order to complete an inflight cutover Ranges GAUGE COUNT AVG NONE
APPLICATION physical_replication.flush_hist_nanos Time spent flushing messages across all replication streams Nanoseconds HISTOGRAM NANOSECONDS AVG NONE
APPLICATION physical_replication.flushes Total flushes across all replication jobs Flushes COUNTER COUNT AVG NON_NEGATIVE_DERIVATIVE
APPLICATION physical_replication.logical_bytes Logical bytes (sum of keys + values) ingested by all replication jobs Bytes COUNTER BYTES AVG NON_NEGATIVE_DERIVATIVE
diff --git a/docs/generated/settings/settings-for-tenants.txt b/docs/generated/settings/settings-for-tenants.txt
index dfc26a6b495b..6a16230971f1 100644
--- a/docs/generated/settings/settings-for-tenants.txt
+++ b/docs/generated/settings/settings-for-tenants.txt
@@ -385,7 +385,6 @@ sql.ttl.job.enabled boolean true whether the TTL job is enabled application
sql.txn.read_committed_isolation.enabled boolean true set to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commands application
sql.txn.repeatable_read_isolation.enabled (alias: sql.txn.snapshot_isolation.enabled) boolean false set to true to allow transactions to use the REPEATABLE READ isolation level if specified by BEGIN/SET commands application
sql.txn_fingerprint_id_cache.capacity integer 100 the maximum number of txn fingerprint IDs stored application
-storage.columnar_blocks.enabled boolean false set to true to enable columnar-blocks to store KVs in a columnar format system-visible
storage.delete_compaction_excise.enabled boolean true set to false to direct Pebble to not partially excise sstables in delete-only compactions system-visible
storage.ingestion.value_blocks.enabled boolean true set to true to enable writing of value blocks in ingestion sstables application
storage.max_sync_duration duration 20s maximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crash system-visible
@@ -401,4 +400,4 @@ trace.snapshot.rate duration 0s if non-zero, interval at which background trace
trace.span_registry.enabled boolean true if set, ongoing traces can be seen at https:///#/debug/tracez application
trace.zipkin.collector string the address of a Zipkin instance to receive traces, as :. If no port is specified, 9411 will be used. application
ui.display_timezone enumeration etc/utc the timezone used to format timestamps in the ui [etc/utc = 0, america/new_york = 1] application
-version version 1000024.2-upgrading-to-1000024.3-step-022 set the active cluster version in the format '.' application
+version version 24.2-upgrading-to-24.3-step-022 set the active cluster version in the format '.' application
diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html
index d99b741a3fd9..3218909ff771 100644
--- a/docs/generated/settings/settings.html
+++ b/docs/generated/settings/settings.html
@@ -339,7 +339,6 @@
sql.txn.read_committed_isolation.enabled
boolean true
set to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commands Serverless/Dedicated/Self-Hosted
sql.txn.repeatable_read_isolation.enabled (alias: sql.txn.snapshot_isolation.enabled)
boolean false
set to true to allow transactions to use the REPEATABLE READ isolation level if specified by BEGIN/SET commands Serverless/Dedicated/Self-Hosted
sql.txn_fingerprint_id_cache.capacity
integer 100
the maximum number of txn fingerprint IDs stored Serverless/Dedicated/Self-Hosted
-storage.columnar_blocks.enabled
boolean false
set to true to enable columnar-blocks to store KVs in a columnar format Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.delete_compaction_excise.enabled
boolean true
set to false to direct Pebble to not partially excise sstables in delete-only compactions Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.experimental.eventually_file_only_snapshots.enabled
boolean true
set to false to disable eventually-file-only-snapshots (kv.snapshot_receiver.excise.enabled must also be false) Dedicated/Self-Hosted
storage.ingest_split.enabled
boolean true
set to false to disable ingest-time splitting that lowers write-amplification Dedicated/Self-Hosted
@@ -359,6 +358,6 @@
trace.span_registry.enabled
boolean true
if set, ongoing traces can be seen at https://<ui>/#/debug/tracez Serverless/Dedicated/Self-Hosted
trace.zipkin.collector
string
the address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used. Serverless/Dedicated/Self-Hosted
ui.display_timezone
enumeration etc/utc
the timezone used to format timestamps in the ui [etc/utc = 0, america/new_york = 1] Serverless/Dedicated/Self-Hosted
-version
version 1000024.2-upgrading-to-1000024.3-step-022
set the active cluster version in the format '<major>.<minor>' Serverless/Dedicated/Self-Hosted
+version
version 24.2-upgrading-to-24.3-step-022
set the active cluster version in the format '<major>.<minor>' Serverless/Dedicated/Self-Hosted
diff --git a/go.mod b/go.mod
index 56c3a750c61a..93f78fa9896a 100644
--- a/go.mod
+++ b/go.mod
@@ -135,7 +135,7 @@ require (
github.com/cockroachdb/go-test-teamcity v0.0.0-20191211140407-cff980ad0a55
github.com/cockroachdb/gostdlib v1.19.0
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b
- github.com/cockroachdb/pebble v0.0.0-20241024171322-faa0ea57dbc3
+ github.com/cockroachdb/pebble v0.0.0-20241028145347-405b4e1228d5
github.com/cockroachdb/redact v1.1.5
github.com/cockroachdb/returncheck v0.0.0-20200612231554-92cdbca611dd
github.com/cockroachdb/stress v0.0.0-20220803192808-1806698b1b7b
diff --git a/go.sum b/go.sum
index e6e57f37d316..3067ebbd6508 100644
--- a/go.sum
+++ b/go.sum
@@ -536,8 +536,8 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA=
-github.com/cockroachdb/pebble v0.0.0-20241024171322-faa0ea57dbc3 h1:NI4atafM0PjZeIB8XPZzK8nQ5/t1XcBgC/2r2LxqHSs=
-github.com/cockroachdb/pebble v0.0.0-20241024171322-faa0ea57dbc3/go.mod h1:XmS8uVDd9YFw/1R7J0J/CmTUANwT7iGnBRxH9AyDA90=
+github.com/cockroachdb/pebble v0.0.0-20241028145347-405b4e1228d5 h1:bd63AydJbSsdi3ZyM9bg56UQcwgw2DaajeGMtWGXedw=
+github.com/cockroachdb/pebble v0.0.0-20241028145347-405b4e1228d5/go.mod h1:XmS8uVDd9YFw/1R7J0J/CmTUANwT7iGnBRxH9AyDA90=
github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel
index d45845c8507f..7b33745b9fd5 100644
--- a/pkg/BUILD.bazel
+++ b/pkg/BUILD.bazel
@@ -578,7 +578,6 @@ ALL_TESTS = [
"//pkg/sql/schemachanger/scplan/internal/rules/current:current_test",
"//pkg/sql/schemachanger/scplan/internal/rules/release_24_1:release_24_1_test",
"//pkg/sql/schemachanger/scplan/internal/rules/release_24_2:release_24_2_test",
- "//pkg/sql/schemachanger/scplan/internal/rules/release_24_3:release_24_3_test",
"//pkg/sql/schemachanger/scplan/internal/scgraph:scgraph_test",
"//pkg/sql/schemachanger/scplan:scplan_test",
"//pkg/sql/schemachanger/screl:screl_test",
@@ -729,7 +728,6 @@ ALL_TESTS = [
"//pkg/util/mon:mon_test",
"//pkg/util/netutil/addr:addr_test",
"//pkg/util/netutil:netutil_test",
- "//pkg/util/num32:num32_test",
"//pkg/util/optional:optional_test",
"//pkg/util/parquet:parquet_test",
"//pkg/util/pprofutil:pprofutil_test",
@@ -2164,8 +2162,6 @@ GO_TARGETS = [
"//pkg/sql/schemachanger/scplan/internal/rules/release_24_1:release_24_1_test",
"//pkg/sql/schemachanger/scplan/internal/rules/release_24_2:release_24_2",
"//pkg/sql/schemachanger/scplan/internal/rules/release_24_2:release_24_2_test",
- "//pkg/sql/schemachanger/scplan/internal/rules/release_24_3:release_24_3",
- "//pkg/sql/schemachanger/scplan/internal/rules/release_24_3:release_24_3_test",
"//pkg/sql/schemachanger/scplan/internal/rules:rules",
"//pkg/sql/schemachanger/scplan/internal/scgraph:scgraph",
"//pkg/sql/schemachanger/scplan/internal/scgraph:scgraph_test",
@@ -2549,8 +2545,6 @@ GO_TARGETS = [
"//pkg/util/netutil/addr:addr_test",
"//pkg/util/netutil:netutil",
"//pkg/util/netutil:netutil_test",
- "//pkg/util/num32:num32",
- "//pkg/util/num32:num32_test",
"//pkg/util/optional:optional",
"//pkg/util/optional:optional_test",
"//pkg/util/parquet:parquet",
diff --git a/pkg/acceptance/compose/gss/build-push-gss.sh b/pkg/acceptance/compose/gss/build-push-gss.sh
index 75103c284ff4..a62ec0096404 100755
--- a/pkg/acceptance/compose/gss/build-push-gss.sh
+++ b/pkg/acceptance/compose/gss/build-push-gss.sh
@@ -9,10 +9,5 @@ set -xeuo pipefail
TARGET=$1
TAG=$(date +%Y%m%d-%H%M%S)
-if which podman; then
- podman build --platform linux/amd64,linux/arm64 --manifest us-east1-docker.pkg.dev/crl-ci-images/cockroach/acceptance-gss-$TARGET:$TAG ./$TARGET
- podman manifest push us-east1-docker.pkg.dev/crl-ci-images/cockroach/acceptance-gss-$TARGET:$TAG us-east1-docker.pkg.dev/crl-ci-images/cockroach/acceptance-gss-$TARGET:$TAG
-else
- docker buildx create --use
- docker buildx build --push --platform linux/amd64,linux/arm64 -t us-east1-docker.pkg.dev/crl-ci-images/cockroach/acceptance-gss-$TARGET:$TAG ./$TARGET
-fi
+docker buildx create --use
+docker buildx build --push --platform linux/amd64,linux/arm64 -t us-east1-docker.pkg.dev/crl-ci-images/cockroach/acceptance-gss-$TARGET:$TAG ./$TARGET
diff --git a/pkg/base/config.go b/pkg/base/config.go
index b6a784f34a2b..e26d09c1846c 100644
--- a/pkg/base/config.go
+++ b/pkg/base/config.go
@@ -166,18 +166,33 @@ var (
// after heartbeating then the timeout fires after 2s of unavailability and
// the offset is -1s).
//
- // Raft election:
- // - Heartbeat offset (0-1 heartbeat interval) [-1.00s - 0.00s]
- // - Election timeout (random 1x-2x timeout) [ 2.00s - 4.00s]
- // - Election (3x RTT: prevote, vote, append) [ 0.03s - 1.20s]
- // Total latency [ 1.03s - 5.20s]
+ // Raft election (fortification disabled):
+ // - Heartbeat offset (0-1 heartbeat interval) [-1.00s - 0.00s]
+ // - Election timeout (random 1x-2x timeout) [ 2.00s - 4.00s]
+ // - Election (3x RTT: prevote, vote, append) [ 0.03s - 1.20s]
+ // Total latency [ 1.03s - 5.20s]
//
- // Lease acquisition:
- // - Heartbeat offset (0-1 heartbeat interval) [-3.00s - 0.00s]
- // - Lease expiration (constant) [ 6.00s - 6.00s]
- // - Liveness epoch bump (2x RTT: CPut + append) [ 0.02s - 0.80s]
- // - Lease acquisition (1x RTT: append) [ 0.01s - 0.40s]
- // Total latency [ 3.03s - 7.20s]
+ // Expiration lease acquisition:
+ // - Heartbeat offset (0-1 heartbeat interval) [-3.00s - 0.00s]
+ // - Lease expiration (constant) [ 6.00s - 6.00s]
+ // - Lease acquisition (1x RTT: append) [ 0.01s - 0.40s]
+ // Total latency [ 3.01s - 6.40s]
+ //
+ // Epoch lease acquisition:
+ // - Node Liveness heartbeat offset (0-1 heartbeat interval) [-3.00s - 0.00s]
+ // - Node Liveness record expiration (constant) [ 6.00s - 6.00s]
+ // - Node Liveness epoch bump (2x RTT: CPut + append) [ 0.02s - 0.80s]
+ // - Lease acquisition (1x RTT: append) [ 0.01s - 0.40s]
+ // Total latency [ 3.03s - 7.20s]
+ //
+ // Leader lease acquisition (including raft election):
+ // - Store Liveness heartbeat offset (0-1 heartbeat interval) [-3.00s - 0.00s]
+ // - Store Liveness expiration (constant) [ 6.00s - 6.00s]
+ // - Store Liveness withdrawal (0-1 withdrawal interval) [ 0.00s - 0.10s]
+ // - Raft election timeout jitter (random 0x-1x timeout) [ 0.00s - 2.00s]
+ // - Election (3x RTT: prevote, vote, append) [ 0.03s - 1.20s]
+ // - Lease acquisition (1x RTT: append) [ 0.01s - 0.40s]
+ // Total latency [ 3.04s - 9.70s]
//
// (generated by TestDefaultRaftConfig)
//
@@ -220,7 +235,7 @@ var (
// defaultRaftHeartbeatIntervalTicks is the default value for
// RaftHeartbeatIntervalTicks, which determines the number of ticks between
// each heartbeat.
- defaultRaftHeartbeatIntervalTicks = envutil.EnvOrDefaultInt64(
+ defaultRaftHeartbeatIntervalTicks = envutil.EnvOrDefaultInt(
"COCKROACH_RAFT_HEARTBEAT_INTERVAL_TICKS", 2)
// defaultRaftElectionTimeoutTicks specifies the minimum number of Raft ticks
@@ -233,12 +248,12 @@ var (
// sufficient for a full network roundtrip. Raft heartbeats are also sent via
// SystemClass, avoiding head-of-line blocking by general RPC traffic. The 1-2
// random factor provides an additional buffer.
- defaultRaftElectionTimeoutTicks = envutil.EnvOrDefaultInt64(
+ defaultRaftElectionTimeoutTicks = envutil.EnvOrDefaultInt(
"COCKROACH_RAFT_ELECTION_TIMEOUT_TICKS", 4)
// defaultRaftReproposalTimeoutTicks is the number of ticks before reproposing
// a Raft command.
- defaultRaftReproposalTimeoutTicks = envutil.EnvOrDefaultInt64(
+ defaultRaftReproposalTimeoutTicks = envutil.EnvOrDefaultInt(
"COCKROACH_RAFT_REPROPOSAL_TIMEOUT_TICKS", 6)
// defaultRaftEnableCheckQuorum specifies whether to enable CheckQuorum in
@@ -524,15 +539,15 @@ type RaftConfig struct {
// an election. The actual election timeout is randomized by each replica to
// between 1-2 election timeouts. This value is inherited by individual stores
// unless overridden.
- RaftElectionTimeoutTicks int64
+ RaftElectionTimeoutTicks int
// RaftReproposalTimeoutTicks is the number of ticks before reproposing a Raft
// command. This also specifies the number of ticks between each reproposal
// check, so the actual timeout is 1-2 times this value.
- RaftReproposalTimeoutTicks int64
+ RaftReproposalTimeoutTicks int
// RaftHeartbeatIntervalTicks is the number of ticks that pass between heartbeats.
- RaftHeartbeatIntervalTicks int64
+ RaftHeartbeatIntervalTicks int
// RangeLeaseRaftElectionTimeoutMultiplier specifies the range lease duration.
RangeLeaseDuration time.Duration
diff --git a/pkg/base/config_test.go b/pkg/base/config_test.go
index 39271e329ee8..53163b66e638 100644
--- a/pkg/base/config_test.go
+++ b/pkg/base/config_test.go
@@ -31,6 +31,7 @@ func TestDefaultRaftConfig(t *testing.T) {
// Assert the config and various derived values.
leaseActive, leaseRenewal := cfg.RangeLeaseDurations()
nodeActive, nodeRenewal := cfg.NodeLivenessDurations()
+ storeActive, storeRenewal := cfg.StoreLivenessDurations()
raftElectionTimeout := cfg.RaftElectionTimeout()
raftReproposalTimeout := cfg.RaftTickInterval * time.Duration(cfg.RaftReproposalTimeoutTicks)
raftHeartbeatInterval := cfg.RaftTickInterval * time.Duration(cfg.RaftHeartbeatIntervalTicks)
@@ -44,6 +45,7 @@ func TestDefaultRaftConfig(t *testing.T) {
s += fmt.Sprintf("RangeLeaseDurations: active=%s renewal=%s\n", leaseActive, leaseRenewal)
s += fmt.Sprintf("RangeLeaseAcquireTimeout: %s\n", cfg.RangeLeaseAcquireTimeout())
s += fmt.Sprintf("NodeLivenessDurations: active=%s renewal=%s\n", nodeActive, nodeRenewal)
+ s += fmt.Sprintf("StoreLivenessDurations: active=%s renewal=%s\n", storeActive, storeRenewal)
s += fmt.Sprintf("SentinelGossipTTL: %s\n", cfg.SentinelGossipTTL())
echotest.Require(t, s, datapathutils.TestDataPath(t, "raft_config"))
}
@@ -53,6 +55,9 @@ func TestDefaultRaftConfig(t *testing.T) {
minRTT = 10 * time.Millisecond
maxRTT = 400 * time.Millisecond // max GCP inter-region RTT is ~350ms
maxElectionMultiplier = 2
+ // TODO(nvanbenschoten): don't hardcode this values, separate from the
+ // hardcoded value in storeliveness/config.go.
+ storeLivenessWithdrawalInterval = 100 * time.Millisecond
)
type interval struct {
@@ -74,14 +79,14 @@ func TestDefaultRaftConfig(t *testing.T) {
s := "// " + name + ":\n"
for _, ival := range formatted {
- s += fmt.Sprintf("// %-46s [%5.2fs -%5.2fs]\n",
+ s += fmt.Sprintf("// %-58s [%5.2fs -%5.2fs]\n",
ival.name, ival.min.Seconds(), ival.max.Seconds())
}
return s
}
var s string
- s += formatIntervals("Raft election", []interval{
+ s += formatIntervals("Raft election (fortification disabled)", []interval{
{
"Heartbeat offset (0-1 heartbeat interval)",
-raftHeartbeatInterval,
@@ -99,7 +104,7 @@ func TestDefaultRaftConfig(t *testing.T) {
},
})
s += "//\n"
- s += formatIntervals("Lease acquisition", []interval{
+ s += formatIntervals("Expiration lease acquisition", []interval{
{
"Heartbeat offset (0-1 heartbeat interval)",
-leaseRenewal,
@@ -111,7 +116,27 @@ func TestDefaultRaftConfig(t *testing.T) {
leaseActive,
},
{
- "Liveness epoch bump (2x RTT: CPut + append)",
+ "Lease acquisition (1x RTT: append)",
+ minRTT,
+ maxRTT,
+ },
+ })
+ s += "//\n"
+ s += formatIntervals("Epoch lease acquisition", []interval{
+ {
+ "Node Liveness heartbeat offset (0-1 heartbeat interval)",
+ -nodeRenewal,
+ 0,
+ },
+ {
+ "Node Liveness record expiration (constant)",
+ nodeActive,
+ nodeActive,
+ },
+ // NOTE: this test misses the fact that with quiescence, the raft election
+ // may not be called until this point.
+ {
+ "Node Liveness epoch bump (2x RTT: CPut + append)",
2 * minRTT,
2 * maxRTT,
},
@@ -121,6 +146,39 @@ func TestDefaultRaftConfig(t *testing.T) {
maxRTT,
},
})
+ s += "//\n"
+ s += formatIntervals("Leader lease acquisition (including raft election)", []interval{
+ {
+ "Store Liveness heartbeat offset (0-1 heartbeat interval)",
+ -storeRenewal,
+ 0,
+ },
+ {
+ "Store Liveness expiration (constant)",
+ storeActive,
+ storeActive,
+ },
+ {
+ "Store Liveness withdrawal (0-1 withdrawal interval)",
+ 0,
+ storeLivenessWithdrawalInterval,
+ },
+ {
+ fmt.Sprintf("Raft election timeout jitter (random 0x-%dx timeout)", maxElectionMultiplier-1),
+ 0,
+ (maxElectionMultiplier - 1) * raftElectionTimeout,
+ },
+ {
+ "Election (3x RTT: prevote, vote, append)",
+ 3 * minRTT,
+ 3 * maxRTT,
+ },
+ {
+ "Lease acquisition (1x RTT: append)",
+ minRTT,
+ maxRTT,
+ },
+ })
echotest.Require(t, s, datapathutils.TestDataPath(t, "raft_config_recovery"))
}
diff --git a/pkg/base/testdata/raft_config b/pkg/base/testdata/raft_config
index 7f51b022b738..88ee888091f8 100644
--- a/pkg/base/testdata/raft_config
+++ b/pkg/base/testdata/raft_config
@@ -2,9 +2,9 @@ echo
----
(base.RaftConfig) {
RaftTickInterval: (time.Duration) 500ms,
- RaftElectionTimeoutTicks: (int64) 4,
- RaftReproposalTimeoutTicks: (int64) 6,
- RaftHeartbeatIntervalTicks: (int64) 2,
+ RaftElectionTimeoutTicks: (int) 4,
+ RaftReproposalTimeoutTicks: (int) 6,
+ RaftHeartbeatIntervalTicks: (int) 2,
RangeLeaseDuration: (time.Duration) 6s,
RangeLeaseRenewalFraction: (float64) 0.5,
RaftEnableCheckQuorum: (bool) true,
@@ -23,4 +23,5 @@ RaftReproposalTimeout: 3s
RangeLeaseDurations: active=6s renewal=3s
RangeLeaseAcquireTimeout: 4s
NodeLivenessDurations: active=6s renewal=3s
+StoreLivenessDurations: active=6s renewal=3s
SentinelGossipTTL: 6s
diff --git a/pkg/base/testdata/raft_config_recovery b/pkg/base/testdata/raft_config_recovery
index 124bc2c5a096..e7f8d5968b1a 100644
--- a/pkg/base/testdata/raft_config_recovery
+++ b/pkg/base/testdata/raft_config_recovery
@@ -3,15 +3,30 @@
# reasoning should be adjusted.
echo
----
-// Raft election:
-// - Heartbeat offset (0-1 heartbeat interval) [-1.00s - 0.00s]
-// - Election timeout (random 1x-2x timeout) [ 2.00s - 4.00s]
-// - Election (3x RTT: prevote, vote, append) [ 0.03s - 1.20s]
-// Total latency [ 1.03s - 5.20s]
+// Raft election (fortification disabled):
+// - Heartbeat offset (0-1 heartbeat interval) [-1.00s - 0.00s]
+// - Election timeout (random 1x-2x timeout) [ 2.00s - 4.00s]
+// - Election (3x RTT: prevote, vote, append) [ 0.03s - 1.20s]
+// Total latency [ 1.03s - 5.20s]
//
-// Lease acquisition:
-// - Heartbeat offset (0-1 heartbeat interval) [-3.00s - 0.00s]
-// - Lease expiration (constant) [ 6.00s - 6.00s]
-// - Liveness epoch bump (2x RTT: CPut + append) [ 0.02s - 0.80s]
-// - Lease acquisition (1x RTT: append) [ 0.01s - 0.40s]
-// Total latency [ 3.03s - 7.20s]
+// Expiration lease acquisition:
+// - Heartbeat offset (0-1 heartbeat interval) [-3.00s - 0.00s]
+// - Lease expiration (constant) [ 6.00s - 6.00s]
+// - Lease acquisition (1x RTT: append) [ 0.01s - 0.40s]
+// Total latency [ 3.01s - 6.40s]
+//
+// Epoch lease acquisition:
+// - Node Liveness heartbeat offset (0-1 heartbeat interval) [-3.00s - 0.00s]
+// - Node Liveness record expiration (constant) [ 6.00s - 6.00s]
+// - Node Liveness epoch bump (2x RTT: CPut + append) [ 0.02s - 0.80s]
+// - Lease acquisition (1x RTT: append) [ 0.01s - 0.40s]
+// Total latency [ 3.03s - 7.20s]
+//
+// Leader lease acquisition (including raft election):
+// - Store Liveness heartbeat offset (0-1 heartbeat interval) [-3.00s - 0.00s]
+// - Store Liveness expiration (constant) [ 6.00s - 6.00s]
+// - Store Liveness withdrawal (0-1 withdrawal interval) [ 0.00s - 0.10s]
+// - Raft election timeout jitter (random 0x-1x timeout) [ 0.00s - 2.00s]
+// - Election (3x RTT: prevote, vote, append) [ 0.03s - 1.20s]
+// - Lease acquisition (1x RTT: append) [ 0.01s - 0.40s]
+// Total latency [ 3.04s - 9.70s]
diff --git a/pkg/build/version.txt b/pkg/build/version.txt
index a6f0b11222a8..37bb29f0ec68 100644
--- a/pkg/build/version.txt
+++ b/pkg/build/version.txt
@@ -1 +1 @@
-v24.3.0-alpha.3
+v24.3.0-beta.3
diff --git a/pkg/ccl/backupccl/alter_backup_test.go b/pkg/ccl/backupccl/alter_backup_test.go
index 299c0fb15ecd..c9dacc1ca387 100644
--- a/pkg/ccl/backupccl/alter_backup_test.go
+++ b/pkg/ccl/backupccl/alter_backup_test.go
@@ -111,10 +111,10 @@ func TestAlterBackupRestore(t *testing.T) {
// Tests to see that show backup correctly recognizes the new encryption-info
// file when SHOW BACKUP is called on an altered encrypted backup.
t.Run("alter-backup-show-backup", func(t *testing.T) {
- query = fmt.Sprintf("SHOW BACKUP FROM LATEST IN %s WITH KMS = %s", userfile, newURI)
+ query = fmt.Sprintf("SHOW BACKUP LATEST IN %s WITH KMS = %s", userfile, newURI)
sqlDB.Exec(t, query)
- query = fmt.Sprintf("SHOW BACKUP FROM LATEST IN %s WITH KMS = %s", userfile, oldURI)
+ query = fmt.Sprintf("SHOW BACKUP LATEST IN %s WITH KMS = %s", userfile, oldURI)
sqlDB.Exec(t, query)
})
diff --git a/pkg/ccl/backupccl/backup_tenant_test.go b/pkg/ccl/backupccl/backup_tenant_test.go
index 99dbfa37bf87..6643e8e58b01 100644
--- a/pkg/ccl/backupccl/backup_tenant_test.go
+++ b/pkg/ccl/backupccl/backup_tenant_test.go
@@ -145,7 +145,7 @@ func TestBackupTenantImportingTable(t *testing.T) {
// tenant now has a fully ingested, paused import, so back them up.
const dst = "userfile:///t"
- if _, err := sqlDB.DB.ExecContext(ctx, `BACKUP TENANT 10 INTO $1`, dst); err != nil {
+ if _, err := sqlDB.DB.ExecContext(ctx, `BACKUP TENANT 10 TO $1`, dst); err != nil {
t.Fatal(err)
}
// Destroy the tenant, then restore it.
@@ -156,7 +156,7 @@ func TestBackupTenantImportingTable(t *testing.T) {
if _, err := sqlDB.DB.ExecContext(ctx, "DROP TENANT [10] IMMEDIATE"); err != nil {
t.Fatal(err)
}
- if _, err := sqlDB.DB.ExecContext(ctx, "RESTORE TENANT 10 FROM LATEST IN $1", dst); err != nil {
+ if _, err := sqlDB.DB.ExecContext(ctx, "RESTORE TENANT 10 FROM $1", dst); err != nil {
t.Fatal(err)
}
diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go
index ef9cc27659ba..6d85a48b18d9 100644
--- a/pkg/ccl/backupccl/backup_test.go
+++ b/pkg/ccl/backupccl/backup_test.go
@@ -89,7 +89,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
- "github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/admission"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
@@ -133,16 +132,15 @@ func TestBackupRestoreStatementResult(t *testing.T) {
defer cleanupFn()
if err := backuptestutils.VerifyBackupRestoreStatementResult(
- t, sqlDB, "BACKUP DATABASE data INTO $1", localFoo,
+ t, sqlDB, "BACKUP DATABASE data TO $1", localFoo,
); err != nil {
t.Fatal(err)
}
- backupPath := getFullBackupPaths(t, sqlDB, localFoo)[0]
// The GZipBackupManifest subtest is to verify that BackupManifest objects
// have been stored in the GZip compressed format.
t.Run("GZipBackupManifest", func(t *testing.T) {
backupDir := fmt.Sprintf("%s/foo", dir)
- backupManifestFile := backupDir + backupPath + "/" + backupbase.BackupManifestName
+ backupManifestFile := backupDir + "/" + backupbase.BackupManifestName
backupManifestBytes, err := os.ReadFile(backupManifestFile)
if err != nil {
t.Fatal(err)
@@ -153,7 +151,7 @@ func TestBackupRestoreStatementResult(t *testing.T) {
sqlDB.Exec(t, "CREATE DATABASE data2")
if err := backuptestutils.VerifyBackupRestoreStatementResult(
- t, sqlDB, "RESTORE TABLE data.* FROM LATEST IN $1 WITH OPTIONS (into_db='data2')", localFoo,
+ t, sqlDB, "RESTORE data.* FROM $1 WITH OPTIONS (into_db='data2')", localFoo,
); err != nil {
t.Fatal(err)
}
@@ -356,11 +354,11 @@ func TestBackupRestorePartitioned(t *testing.T) {
runBackupRestore := func(t *testing.T, sqlDB *sqlutils.SQLRunner, backupURIs []string) {
locationFmtString, locationURIArgs := uriFmtStringAndArgs(backupURIs, 0)
- backupQuery := fmt.Sprintf("BACKUP DATABASE data INTO %s", locationFmtString)
+ backupQuery := fmt.Sprintf("BACKUP DATABASE data TO %s", locationFmtString)
sqlDB.Exec(t, backupQuery, locationURIArgs...)
sqlDB.Exec(t, `DROP DATABASE data;`)
- restoreQuery := fmt.Sprintf("RESTORE DATABASE data FROM LATEST IN %s", locationFmtString)
+ restoreQuery := fmt.Sprintf("RESTORE DATABASE data FROM %s", locationFmtString)
sqlDB.Exec(t, restoreQuery, locationURIArgs...)
}
@@ -380,16 +378,12 @@ func TestBackupRestorePartitioned(t *testing.T) {
fmt.Sprintf("%s?COCKROACH_LOCALITY=%s", locations[2], url.QueryEscape("dc=dc2")),
}
runBackupRestore(t, sqlDB, backupURIs)
- backupPath := getFullBackupPaths(t, sqlDB, locations[0])[0]
- backupPaths := util.Map(locations, func(uri string) string {
- return uri + backupPath
- })
// Verify that at least one SST exists in each backup destination.
- requireHasSSTs(t, dirOf(backupPaths[0]), dirOf(backupPaths[1]), dirOf(backupPaths[2]))
+ requireHasSSTs(t, dirOf(locations[0]), dirOf(locations[1]), dirOf(locations[2]))
// Verify that all of the partition manifests are compressed.
- requireCompressedManifest(t, backupPaths...)
+ requireCompressedManifest(t, locations...)
})
// Test that we're selecting the most specific locality tier for a location.
@@ -410,15 +404,11 @@ func TestBackupRestorePartitioned(t *testing.T) {
}
runBackupRestore(t, sqlDB, backupURIs)
- backupPath := getFullBackupPaths(t, sqlDB, locations[0])[0]
- backupPaths := util.Map(locations, func(uri string) string {
- return uri + backupPath
- })
// All data should be covered by az=az1 or az=az2, so expect all the
// data on those locations.
- requireHasNoSSTs(t, dirOf(backupPaths[0]), dirOf(backupPaths[1]))
- requireHasSSTs(t, dirOf(backupPaths[2]), dirOf(backupPaths[3]))
+ requireHasNoSSTs(t, dirOf(locations[0]), dirOf(locations[1]))
+ requireHasSSTs(t, dirOf(locations[2]), dirOf(locations[3]))
})
t.Run("partition-by-several-keys", func(t *testing.T) {
@@ -439,7 +429,7 @@ func TestBackupRestorePartitioned(t *testing.T) {
// Specifying multiple tiers is not supported.
locationFmtString, locationURIArgs := uriFmtStringAndArgs(backupURIs, 0)
- backupQuery := fmt.Sprintf("BACKUP DATABASE data INTO %s", locationFmtString)
+ backupQuery := fmt.Sprintf("BACKUP DATABASE data TO %s", locationFmtString)
sqlDB.ExpectErr(t, `tier must be in the form "key=value" not "region=east,az=az1"`, backupQuery, locationURIArgs...)
})
}
@@ -716,11 +706,11 @@ func TestBackupAndRestoreJobDescription(t *testing.T) {
sqlDB.Exec(t, "BACKUP INTO ($1, $2, $3)", collections...)
sqlDB.Exec(t, "BACKUP INTO LATEST IN ($1, $2, $3)", collections...)
- sqlDB.Exec(t, "BACKUP INTO LATEST IN ($1, $2, $3) WITH incremental_location=($4, $5, $6)",
+ sqlDB.Exec(t, "BACKUP INTO LATEST IN ($1, $2, $3) WITH incremental_location = ($4, $5, $6)",
append(collections, incrementals...)...)
sqlDB.ExpectErr(t, "the incremental_location option must contain the same number of locality",
- "BACKUP INTO LATEST IN $4 WITH incremental_location=($1, $2, $3)",
+ "BACKUP INTO LATEST IN $4 WITH incremental_location = ($1, $2, $3)",
append(incrementals, collections[0])...)
sqlDB.ExpectErr(t, "A full backup cannot be written to \"/subdir\", a user defined subdirectory. To take a full backup, remove the subdirectory from the backup command",
@@ -762,7 +752,7 @@ func TestBackupAndRestoreJobDescription(t *testing.T) {
sqlDB.Exec(t, "DROP DATABASE data CASCADE")
sqlDB.Exec(t, "RESTORE DATABASE data FROM $7 IN ($1, $2, "+
- "$3) WITH incremental_location=($4, $5, $6)",
+ "$3) WITH incremental_location = ($4, $5, $6)",
append(collections, incrementals[0], incrementals[1], incrementals[2], full1)...)
// Test restoring from the AOST backup
@@ -1904,7 +1894,7 @@ func TestBackupRestoreUserDefinedSchemas(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE sc.t1 (a STRING);
`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts4)
- sqlDB.Exec(t, `BACKUP DATABASE d INTO 'nodelocal://1/rev-history-backup' WITH revision_history`)
+ sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://1/rev-history-backup' WITH revision_history`)
sqlDB.Exec(t, `DROP TABLE sc.t1;`)
sqlDB.Exec(t, `DROP SCHEMA sc;
@@ -1914,42 +1904,42 @@ func TestBackupRestoreUserDefinedSchemas(t *testing.T) {
sqlDB.Exec(t, `CREATE SCHEMA sc;`)
sqlDB.Exec(t, `CREATE TABLE sc.t1 (a FLOAT);`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts6)
- sqlDB.Exec(t, `BACKUP DATABASE d INTO 'nodelocal://1/rev-history-backup' WITH revision_history`)
+ sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://1/rev-history-backup' WITH revision_history`)
t.Run("ts1", func(t *testing.T) {
sqlDB.Exec(t, "DROP DATABASE d;")
- sqlDB.Exec(t, "RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/rev-history-backup' AS OF SYSTEM TIME "+ts1)
+ sqlDB.Exec(t, "RESTORE DATABASE d FROM 'nodelocal://1/rev-history-backup' AS OF SYSTEM TIME "+ts1)
sqlDB.Exec(t, "INSERT INTO d.sc.t1 VALUES (1)")
sqlDB.Exec(t, "INSERT INTO d.sc2.t1 VALUES (true)")
sqlDB.Exec(t, "USE d; CREATE SCHEMA unused;")
})
t.Run("ts2", func(t *testing.T) {
sqlDB.Exec(t, "DROP DATABASE d;")
- sqlDB.Exec(t, "RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/rev-history-backup' AS OF SYSTEM TIME "+ts2)
+ sqlDB.Exec(t, "RESTORE DATABASE d FROM 'nodelocal://1/rev-history-backup' AS OF SYSTEM TIME "+ts2)
sqlDB.Exec(t, "INSERT INTO d.sc3.t1 VALUES (1)")
sqlDB.Exec(t, "INSERT INTO d.sc.t1 VALUES (true)")
})
t.Run("ts3", func(t *testing.T) {
sqlDB.Exec(t, "DROP DATABASE d;")
- sqlDB.Exec(t, "RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/rev-history-backup' AS OF SYSTEM TIME "+ts3)
+ sqlDB.Exec(t, "RESTORE DATABASE d FROM 'nodelocal://1/rev-history-backup' AS OF SYSTEM TIME "+ts3)
sqlDB.Exec(t, "USE d")
sqlDB.Exec(t, "CREATE SCHEMA sc")
sqlDB.Exec(t, "CREATE SCHEMA sc3;")
})
t.Run("ts4", func(t *testing.T) {
sqlDB.Exec(t, "DROP DATABASE d;")
- sqlDB.Exec(t, "RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/rev-history-backup' AS OF SYSTEM TIME "+ts4)
+ sqlDB.Exec(t, "RESTORE DATABASE d FROM 'nodelocal://1/rev-history-backup' AS OF SYSTEM TIME "+ts4)
sqlDB.Exec(t, "INSERT INTO d.sc.t1 VALUES ('hello')")
})
t.Run("ts5", func(t *testing.T) {
sqlDB.Exec(t, "DROP DATABASE d;")
- sqlDB.Exec(t, "RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/rev-history-backup' AS OF SYSTEM TIME "+ts5)
+ sqlDB.Exec(t, "RESTORE DATABASE d FROM 'nodelocal://1/rev-history-backup' AS OF SYSTEM TIME "+ts5)
sqlDB.Exec(t, "USE d")
sqlDB.Exec(t, "CREATE SCHEMA sc")
})
t.Run("ts6", func(t *testing.T) {
sqlDB.Exec(t, "DROP DATABASE d;")
- sqlDB.Exec(t, "RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/rev-history-backup' AS OF SYSTEM TIME "+ts6)
+ sqlDB.Exec(t, "RESTORE DATABASE d FROM 'nodelocal://1/rev-history-backup' AS OF SYSTEM TIME "+ts6)
sqlDB.Exec(t, `INSERT INTO d.sc.t1 VALUES (123.123)`)
})
})
@@ -2001,11 +1991,11 @@ func TestBackupRestoreUserDefinedSchemas(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE sc.tb2 (x sc.typ1);`)
sqlDB.Exec(t, `INSERT INTO sc.tb2 VALUES ('hello');`)
// Backup the database.
- sqlDB.Exec(t, `BACKUP DATABASE d INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://1/test/'`)
// Drop the database and restore into it.
sqlDB.Exec(t, `DROP DATABASE d`)
- sqlDB.Exec(t, `RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `RESTORE DATABASE d FROM 'nodelocal://1/test/'`)
// Check that we can resolve all names through the user defined schema.
sqlDB.CheckQueryResults(t, `SELECT * FROM d.sc.tb1`, [][]string{{"1"}})
@@ -2072,9 +2062,9 @@ func TestBackupRestoreUserDefinedSchemas(t *testing.T) {
expectedTablesInBackup: [][]string{{"data", "tb1"}, {"public", "bank"}, {"public", "table_in_data"}},
},
} {
- sqlDB.Exec(t, fmt.Sprintf(`BACKUP TABLE %s INTO 'nodelocal://1/%s'`, tc.target, tc.name))
+ sqlDB.Exec(t, fmt.Sprintf(`BACKUP TABLE %s TO 'nodelocal://1/%s'`, tc.target, tc.name))
sqlDB.Exec(t, `CREATE DATABASE restore`)
- sqlDB.Exec(t, fmt.Sprintf(`RESTORE TABLE %s FROM LATEST IN 'nodelocal://1/%s' WITH into_db='restore'`, tc.target, tc.name))
+ sqlDB.Exec(t, fmt.Sprintf(`RESTORE TABLE %s FROM 'nodelocal://1/%s' WITH into_db='restore'`, tc.target, tc.name))
sqlDB.CheckQueryResults(t, `SELECT schema_name,
table_name from [SHOW TABLES FROM restore] ORDER BY schema_name, table_name`, tc.expectedTablesInBackup)
sqlDB.Exec(t, `DROP DATABASE restore CASCADE`)
@@ -2400,7 +2390,7 @@ INSERT INTO d.t3 VALUES ('hi');
// Create a backup of all the tables in d.
{
// Backup all of the tables.
- sqlDB.Exec(t, `BACKUP TABLE d.* INTO 'nodelocal://1/test_all_tables/'`)
+ sqlDB.Exec(t, `BACKUP d.* INTO 'nodelocal://1/test_all_tables/'`)
// Create a new database to restore all of the tables into.
sqlDB.Exec(t, `CREATE DATABASE d4`)
// Restore all of the tables.
@@ -2749,7 +2739,7 @@ CREATE TYPE d.greeting AS ENUM ('hello', 'howdy', 'hi');
// Now create a backup while the type change job is blocked so that
// greeting is backed up with some enum members in READ_ONLY state.
- sqlDB.Exec(t, `BACKUP DATABASE d INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://1/test/'`)
// Let the type change finish.
close(waitForBackup)
@@ -2757,7 +2747,7 @@ CREATE TYPE d.greeting AS ENUM ('hello', 'howdy', 'hi');
// Now drop the database and restore.
sqlDB.Exec(t, `DROP DATABASE d`)
- restoreQuery := `RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/test/'`
+ restoreQuery := `RESTORE DATABASE d FROM 'nodelocal://1/test/'`
if isSchemaOnly {
restoreQuery = restoreQuery + " with schema_only"
}
@@ -2880,7 +2870,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) {
db := sqlutils.MakeSQLRunner(tc.Conns[0])
db.Exec(t, createStore)
- db.Exec(t, `RESTORE TABLE store.* FROM LATEST IN $1`, localFoo)
+ db.Exec(t, `RESTORE store.* FROM LATEST IN $1`, localFoo)
// Restore's Validate checks all the tables point to each other correctly.
db.CheckQueryResults(t, `SHOW CONSTRAINTS FROM store.customers`, origCustomers)
@@ -2917,7 +2907,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) {
defer tc.Stopper().Stop(context.Background())
db := sqlutils.MakeSQLRunner(tc.Conns[0])
db.Exec(t, createStore)
- db.Exec(t, `RESTORE TABLE store.customers, store.orders FROM LATEST IN $1`, localFoo)
+ db.Exec(t, `RESTORE store.customers, store.orders FROM LATEST IN $1`, localFoo)
// Restore's Validate checks all the tables point to each other correctly.
// FK validation on customers from orders is preserved.
@@ -2939,10 +2929,10 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) {
// FK validation of self-FK is preserved.
db.ExpectErr(
t, "cannot restore table \"orders\" without referenced table .* \\(or \"skip_missing_foreign_keys\" option\\)",
- `RESTORE TABLE store.orders FROM LATEST IN $1`, localFoo,
+ `RESTORE store.orders FROM LATEST IN $1`, localFoo,
)
- db.Exec(t, `RESTORE TABLE store.orders FROM LATEST IN $1 WITH OPTIONS (skip_missing_foreign_keys)`, localFoo)
+ db.Exec(t, `RESTORE store.orders FROM LATEST IN $1 WITH OPTIONS (skip_missing_foreign_keys)`, localFoo)
// Restore's Validate checks all the tables point to each other correctly.
// FK validation is gone.
@@ -2955,7 +2945,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) {
defer tc.Stopper().Stop(context.Background())
db := sqlutils.MakeSQLRunner(tc.Conns[0])
db.Exec(t, createStore)
- db.Exec(t, `RESTORE TABLE store.receipts FROM LATEST IN $1 WITH OPTIONS (skip_missing_foreign_keys)`, localFoo)
+ db.Exec(t, `RESTORE store.receipts FROM LATEST IN $1 WITH OPTIONS (skip_missing_foreign_keys)`, localFoo)
// Restore's Validate checks all the tables point to each other correctly.
// FK validation of orders and customer is gone.
@@ -2973,7 +2963,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) {
defer tc.Stopper().Stop(context.Background())
db := sqlutils.MakeSQLRunner(tc.Conns[0])
db.Exec(t, createStore)
- db.Exec(t, `RESTORE TABLE store.receipts, store.customers FROM LATEST IN $1 WITH OPTIONS (skip_missing_foreign_keys)`, localFoo)
+ db.Exec(t, `RESTORE store.receipts, store.customers FROM LATEST IN $1 WITH OPTIONS (skip_missing_foreign_keys)`, localFoo)
// Restore's Validate checks all the tables point to each other correctly.
// FK validation of orders is gone.
@@ -3005,9 +2995,9 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) {
db.Exec(t, createStore)
db.ExpectErr(
t, `cannot restore view "early_customers" without restoring referenced table`,
- `RESTORE TABLE store.early_customers FROM LATEST IN $1`, localFoo,
+ `RESTORE store.early_customers FROM LATEST IN $1`, localFoo,
)
- db.Exec(t, `RESTORE TABLE store.early_customers, store.customers, store.orders FROM LATEST IN $1`, localFoo)
+ db.Exec(t, `RESTORE store.early_customers, store.customers, store.orders FROM LATEST IN $1`, localFoo)
db.CheckQueryResults(t, `SELECT * FROM store.early_customers`, origEarlyCustomers)
// nothing depends on orders so it can be dropped.
@@ -3044,10 +3034,10 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) {
db.ExpectErr(
t, `cannot restore view "ordercounts" without restoring referenced table`,
- `RESTORE TABLE storestats.ordercounts, store.customers FROM LATEST IN $1`, localFoo,
+ `RESTORE storestats.ordercounts, store.customers FROM LATEST IN $1`, localFoo,
)
- db.Exec(t, `RESTORE TABLE store.customers, storestats.ordercounts, store.orders FROM LATEST IN $1`, localFoo)
+ db.Exec(t, `RESTORE store.customers, storestats.ordercounts, store.orders FROM LATEST IN $1`, localFoo)
// we want to observe just the view-related errors, not fk errors below.
db.Exec(t, `ALTER TABLE store.orders DROP CONSTRAINT orders_customerid_fkey`)
@@ -3071,7 +3061,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) {
db.CheckQueryResults(t, `SELECT * FROM storestats.ordercounts ORDER BY id`, origOrderCounts)
db.Exec(t, `CREATE DATABASE otherstore`)
- db.Exec(t, `RESTORE TABLE store.* FROM LATEST IN $1 WITH into_db = 'otherstore'`, localFoo)
+ db.Exec(t, `RESTORE store.* FROM LATEST IN $1 WITH into_db = 'otherstore'`, localFoo)
// we want to observe just the view-related errors, not fk errors below.
db.Exec(t, `ALTER TABLE otherstore.orders DROP CONSTRAINT orders_customerid_fkey`)
db.Exec(t, `DROP TABLE otherstore.receipts`)
@@ -3096,13 +3086,13 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) {
// Test cases where, after filtering out views that can't be restored, there are no other tables to restore
db.Exec(t, `RESTORE DATABASE storestats from latest in $1 WITH OPTIONS (skip_missing_views)`, localFoo)
- db.Exec(t, `RESTORE TABLE storestats.ordercounts from latest in $1 WITH OPTIONS (skip_missing_views)`, localFoo)
+ db.Exec(t, `RESTORE storestats.ordercounts from latest in $1 WITH OPTIONS (skip_missing_views)`, localFoo)
// Ensure that the views were not restored since they are missing the tables they reference.
db.CheckQueryResults(t, `USE storestats; SHOW TABLES;`, [][]string{})
// Need to specify into_db otherwise the restore gives error:
// a database named "store" needs to exist to restore schema "public".
- db.Exec(t, `RESTORE TABLE store.early_customers, store.referencing_early_customers from latest in $1 WITH OPTIONS (skip_missing_views, into_db='storestats')`, localFoo)
+ db.Exec(t, `RESTORE store.early_customers, store.referencing_early_customers from latest in $1 WITH OPTIONS (skip_missing_views, into_db='storestats')`, localFoo)
// Ensure that the views were not restored since they are missing the tables they reference.
db.CheckQueryResults(t, `SHOW TABLES;`, [][]string{})
@@ -3123,7 +3113,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) {
// db.Exec(t, createStore)
// storestats.ordercounts depends also on store.orders, so it can't be restored
db.Exec(t, `CREATE DATABASE store2`)
- db.Exec(t, `RESTORE TABLE storestats.ordercounts, store.customers from latest in $1 WITH OPTIONS (skip_missing_views, into_db='store2')`, localFoo)
+ db.Exec(t, `RESTORE storestats.ordercounts, store.customers from latest in $1 WITH OPTIONS (skip_missing_views, into_db='store2')`, localFoo)
db.CheckQueryResults(t, `SHOW CONSTRAINTS FROM store2.customers`, origCustomers)
db.ExpectErr(t, `relation "storestats.ordercounts" does not exist`, `SELECT * FROM storestats.ordercounts`)
})
@@ -3152,12 +3142,15 @@ func checksumBankPayload(t *testing.T, sqlDB *sqlutils.SQLRunner) uint32 {
func TestBackupRestoreIncremental(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
+ // TODO(kev-cao,msbutler): rewrite this test to stop using the wonky old
+ // subdirectory format and just create one full backup off one chain and
+ // restore from each backup using the inc backup end times.
+ //
// NB: because of rounding problems, using the end time in the AOST query is
// not guaranteed to work. We need to fix this.
- skip.WithIssue(t, 86830, "restoring via AOST without revision_history is flaky")
const numAccounts = 10
- const numBackups = 5
+ const numBackups = 4
windowSize := int(numAccounts / 3)
tc, sqlDB, dir, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication)
@@ -3165,18 +3158,10 @@ func TestBackupRestoreIncremental(t *testing.T) {
args := base.TestServerArgs{ExternalIODir: dir}
rng, _ := randutil.NewTestRand()
- backupDir := "nodelocal://1/backup"
- backupEndTimes := make([]string, 0, numBackups)
+ var backupDirs []string
var checksums []uint32
{
- getLatestBackupEndTime := func() string {
- var endTime string
- sqlDB.QueryRow(
- t, `SELECT end_time FROM [SHOW BACKUP FROM LATEST IN $1] ORDER BY end_time DESC LIMIT 1`, backupDir,
- ).Scan(&endTime)
- return endTime
- }
- for backupNum := 0; backupNum < numBackups-1; backupNum++ {
+ for backupNum := 0; backupNum < numBackups; backupNum++ {
// In the following, windowSize is `w` and offset is `o`. The first
// mutation creates accounts with id [w,3w). Every mutation after
// that deletes everything less than o, leaves [o, o+w) unchanged,
@@ -3197,12 +3182,14 @@ func TestBackupRestoreIncremental(t *testing.T) {
checksums = append(checksums, checksumBankPayload(t, sqlDB))
- if backupNum == 0 {
- sqlDB.Exec(t, `BACKUP TABLE data.bank INTO $1`, backupDir)
- } else {
- sqlDB.Exec(t, `BACKUP TABLE data.bank INTO LATEST IN $1`, backupDir)
+ backupDir := fmt.Sprintf("nodelocal://1/%d", backupNum)
+ var from string
+ if backupNum > 0 {
+ from = fmt.Sprintf(` INCREMENTAL FROM %s`, strings.Join(backupDirs, `,`))
}
- backupEndTimes = append(backupEndTimes, getLatestBackupEndTime())
+ sqlDB.Exec(t, fmt.Sprintf(`BACKUP TABLE data.bank TO '%s' %s`, backupDir, from))
+
+ backupDirs = append(backupDirs, fmt.Sprintf(`'%s'`, backupDir))
}
// Test a regression in RESTORE where the batch end key was not
@@ -3210,10 +3197,11 @@ func TestBackupRestoreIncremental(t *testing.T) {
// the greatest key in the diff is less than the previous backups.
sqlDB.Exec(t, `INSERT INTO data.bank VALUES (0, -1, 'final')`)
checksums = append(checksums, checksumBankPayload(t, sqlDB))
- sqlDB.Exec(t, `BACKUP TABLE data.bank INTO LATEST in $1`, backupDir)
- backupEndTimes = append(backupEndTimes, getLatestBackupEndTime())
+ sqlDB.Exec(t, fmt.Sprintf(`BACKUP TABLE data.bank TO '%s' %s`,
+ "nodelocal://1/final", fmt.Sprintf(` INCREMENTAL FROM %s`, strings.Join(backupDirs, `,`)),
+ ))
+ backupDirs = append(backupDirs, `'nodelocal://1/final'`)
}
- require.Len(t, backupEndTimes, numBackups)
// Start a new cluster to restore into.
{
@@ -3231,23 +3219,105 @@ func TestBackupRestoreIncremental(t *testing.T) {
sqlDBRestore.ExpectErr(
t, fmt.Sprintf("belongs to cluster %s", tc.ApplicationLayer(0).RPCContext().LogicalClusterID.Get()),
- `BACKUP TABLE data.bank INTO LATEST IN $1`,
- backupDir,
+ `BACKUP TABLE data.bank TO $1 INCREMENTAL FROM $2`,
+ "nodelocal://1/some-other-table", "nodelocal://1/0",
)
- for i := 0; i < len(backupEndTimes); i++ {
+ for i := len(backupDirs); i > 0; i-- {
sqlDBRestore.Exec(t, `DROP TABLE IF EXISTS data.bank`)
- sqlDBRestore.Exec(
+ from := strings.Join(backupDirs[:i], `,`)
+ sqlDBRestore.Exec(t, fmt.Sprintf(`RESTORE data.bank FROM %s`, from))
+
+ checksum := checksumBankPayload(t, sqlDBRestore)
+ if checksum != checksums[i-1] {
+ t.Fatalf("checksum mismatch at index %d: got %d expected %d",
+ i-1, checksum, checksums[i])
+ }
+ }
+ }
+}
+
+func TestBackupRestorePartitionedIncremental(t *testing.T) {
+ defer leaktest.AfterTest(t)()
+ defer log.Scope(t).Close(t)
+
+ const numAccounts = 10
+ const numBackups = 4
+ windowSize := int(numAccounts / 3)
+
+ _, sqlDB, dir, cleanupFn := backupRestoreTestSetup(t, multiNode, 0, InitManualReplication)
+ defer cleanupFn()
+ args := base.TestServerArgs{ExternalIODir: dir}
+ rng, _ := randutil.NewTestRand()
+
+ // Each incremental backup is written to two different subdirectories in
+ // defaultDir and dc1Dir, respectively.
+ const defaultDir = "nodelocal://1/default"
+ const dc1Dir = "nodelocal://1/dc=dc1"
+ var defaultBackupDirs []string
+ var checksums []uint32
+ {
+ for backupNum := 0; backupNum < numBackups; backupNum++ {
+ // In the following, windowSize is `w` and offset is `o`. The first
+ // mutation creates accounts with id [w,3w). Every mutation after
+ // that deletes everything less than o, leaves [o, o+w) unchanged,
+ // mutates [o+w,o+2w), and inserts [o+2w,o+3w).
+ offset := windowSize * backupNum
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, `DELETE FROM data.bank WHERE id < %d; `, offset)
+ buf.WriteString(`UPSERT INTO data.bank VALUES `)
+ for j := 0; j < windowSize*2; j++ {
+ if j != 0 {
+ buf.WriteRune(',')
+ }
+ id := offset + windowSize + j
+ payload := randutil.RandBytes(rng, backupRestoreRowPayloadSize)
+ fmt.Fprintf(&buf, `(%d, %d, '%s')`, id, backupNum, payload)
+ }
+ sqlDB.Exec(t, buf.String())
+
+ checksums = append(checksums, checksumBankPayload(t, sqlDB))
+
+ defaultBackupDir := fmt.Sprintf("%s/%d", defaultDir, backupNum)
+ dc1BackupDir := fmt.Sprintf("%s/%d", dc1Dir, backupNum)
+ var from string
+ if backupNum > 0 {
+ from = fmt.Sprintf(` INCREMENTAL FROM %s`, strings.Join(defaultBackupDirs, `,`))
+ }
+ sqlDB.Exec(
t,
- `RESTORE TABLE data.bank FROM LATEST IN $1 AS OF SYSTEM TIME $2::STRING`,
- backupDir,
- backupEndTimes[i],
+ fmt.Sprintf(`BACKUP TABLE data.bank TO ('%s?COCKROACH_LOCALITY=%s', '%s?COCKROACH_LOCALITY=%s') %s`,
+ defaultBackupDir, url.QueryEscape("default"),
+ dc1BackupDir, url.QueryEscape("dc=dc1"),
+ from),
)
+ defaultBackupDirs = append(defaultBackupDirs, fmt.Sprintf(`'%s'`, defaultBackupDir))
+ }
+ }
+
+ // Start a new cluster to restore into.
+ {
+ restoreTC := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args})
+ defer restoreTC.Stopper().Stop(context.Background())
+ sqlDBRestore := sqlutils.MakeSQLRunner(restoreTC.Conns[0])
+
+ sqlDBRestore.Exec(t, `CREATE DATABASE data`)
+ for i := len(defaultBackupDirs); i > 0; i-- {
+ sqlDBRestore.Exec(t, `DROP TABLE IF EXISTS data.bank`)
+ var from strings.Builder
+ for backupNum := range defaultBackupDirs[:i] {
+ if backupNum > 0 {
+ from.WriteString(", ")
+ }
+ from.WriteString(fmt.Sprintf("('%s/%d', '%s/%d')", defaultDir, backupNum, dc1Dir, backupNum))
+ }
+ sqlDBRestore.Exec(t, fmt.Sprintf(`RESTORE data.bank FROM %s`, from.String()))
+
checksum := checksumBankPayload(t, sqlDBRestore)
- if checksum != checksums[i] {
+ if checksum != checksums[i-1] {
t.Fatalf("checksum mismatch at index %d: got %d expected %d",
- i, checksum, checksums[i])
+ i-1, checksum, checksums[i])
}
}
}
@@ -3345,7 +3415,7 @@ func TestBackupRestoreWithConcurrentWrites(t *testing.T) {
// Drop the table and restore from backup and check our invariant.
atomic.StoreInt32(&allowErrors, 1)
sqlDB.Exec(t, `DROP TABLE data.bank`)
- sqlDB.Exec(t, `RESTORE TABLE data.* FROM LATEST IN $1`, localFoo)
+ sqlDB.Exec(t, `RESTORE data.* FROM LATEST IN $1`, localFoo)
atomic.StoreInt32(&allowErrors, 0)
bad := sqlDB.QueryStr(t, `SELECT id, balance, payload FROM data.bank WHERE id != balance`)
@@ -3375,14 +3445,14 @@ func TestConcurrentBackupRestores(t *testing.T) {
for j := 0; j < numIterations; j++ {
dbName := fmt.Sprintf("%s_%d", table, j)
backupDir := fmt.Sprintf("nodelocal://1/%s", dbName)
- backupQ := fmt.Sprintf(`BACKUP TABLE data.%s INTO $1`, table)
+ backupQ := fmt.Sprintf(`BACKUP data.%s INTO $1`, table)
if _, err := sqlDB.DB.ExecContext(gCtx, backupQ, backupDir); err != nil {
return err
}
if _, err := sqlDB.DB.ExecContext(gCtx, fmt.Sprintf(`CREATE DATABASE %s`, dbName)); err != nil {
return err
}
- restoreQ := fmt.Sprintf(`RESTORE TABLE data.%s FROM LATEST IN $1 WITH OPTIONS (into_db='%s')`, table, dbName)
+ restoreQ := fmt.Sprintf(`RESTORE data.%s FROM LATEST IN $1 WITH OPTIONS (into_db='%s')`, table, dbName)
if _, err := sqlDB.DB.ExecContext(gCtx, restoreQ, backupDir); err != nil {
return err
}
@@ -3421,7 +3491,7 @@ func TestBackupTenantsWithRevisionHistory(t *testing.T) {
const msg = "can not backup tenants with revision history"
- _, err = sqlDB.DB.ExecContext(ctx, `BACKUP TENANT 10 INTO 'nodelocal://1/foo' WITH revision_history`)
+ _, err = sqlDB.DB.ExecContext(ctx, `BACKUP TENANT 10 TO 'nodelocal://1/foo' WITH revision_history`)
require.Contains(t, fmt.Sprint(err), msg)
_, err = sqlDB.DB.ExecContext(ctx, `BACKUP INTO 'nodelocal://1/bar' WITH revision_history, include_all_virtual_clusters`)
@@ -3511,11 +3581,14 @@ func TestBackupAsOfSystemTime(t *testing.T) {
}
beforeDir := localFoo + `/beforeTs`
- sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE data INTO '%s' AS OF SYSTEM TIME %s`, beforeDir, beforeTs))
+ sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE data TO '%s' AS OF SYSTEM TIME %s`, beforeDir, beforeTs))
equalDir := localFoo + `/equalTs`
- sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE data INTO '%s' AS OF SYSTEM TIME %s`, equalDir, equalTs))
+ sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE data TO '%s' AS OF SYSTEM TIME %s`, equalDir, equalTs))
{
+ // testing UX guardrails for AS OF SYSTEM TIME backups in collections
+ sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE data INTO '%s' AS OF SYSTEM TIME %s`, equalDir, equalTs))
+
sqlDB.ExpectErr(t, "`AS OF SYSTEM TIME` .* must be greater than the previous backup's end time of",
fmt.Sprintf(`BACKUP DATABASE data INTO LATEST IN '%s' AS OF SYSTEM TIME %s`,
equalDir,
@@ -3528,14 +3601,14 @@ func TestBackupAsOfSystemTime(t *testing.T) {
}
sqlDB.Exec(t, `DROP TABLE data.bank`)
- sqlDB.Exec(t, `RESTORE TABLE data.* FROM LATEST IN $1`, beforeDir)
+ sqlDB.Exec(t, `RESTORE data.* FROM $1`, beforeDir)
sqlDB.QueryRow(t, `SELECT count(*) FROM data.bank`).Scan(&rowCount)
if expected := numAccounts; rowCount != expected {
t.Fatalf("expected %d rows but found %d", expected, rowCount)
}
sqlDB.Exec(t, `DROP TABLE data.bank`)
- sqlDB.Exec(t, `RESTORE TABLE data.* FROM LATEST IN $1`, equalDir)
+ sqlDB.Exec(t, `RESTORE data.* FROM $1`, equalDir)
sqlDB.QueryRow(t, `SELECT count(*) FROM data.bank`).Scan(&rowCount)
if expected := numAccounts * 3 / 4; rowCount != expected {
t.Fatalf("expected %d rows but found %d", expected, rowCount)
@@ -3574,21 +3647,22 @@ func TestRestoreAsOfSystemTime(t *testing.T) {
t.Fatal(err)
}
- fullWithRevision, fullNoRevision := dir+"/full-revision", dir+"/full-no-revision"
- incWithRevision, incNoRevision := dir+"/inc-revision", dir+"/inc-no-revision"
+ fullBackup, latestBackup := dir+"/full", dir+"/latest"
+ incBackup, incLatestBackup := dir+"/inc", dir+"/inc-latest"
+ inc2Backup, inc2LatestBackup := incBackup+".2", incLatestBackup+".2"
sqlDB.Exec(t,
- fmt.Sprintf(`BACKUP DATABASE data INTO $1 AS OF SYSTEM TIME %s WITH revision_history`, ts[2]),
- fullWithRevision,
+ fmt.Sprintf(`BACKUP DATABASE data TO $1 AS OF SYSTEM TIME %s WITH revision_history`, ts[2]),
+ fullBackup,
)
sqlDB.Exec(t,
- fmt.Sprintf(`BACKUP DATABASE data INTO $1 AS OF SYSTEM TIME %s`, ts[2]),
- fullNoRevision,
+ fmt.Sprintf(`BACKUP DATABASE data TO $1 AS OF SYSTEM TIME %s`, ts[2]),
+ latestBackup,
)
fullTableBackup := dir + "/tbl"
sqlDB.Exec(t,
- fmt.Sprintf(`BACKUP TABLE data.bank INTO $1 AS OF SYSTEM TIME %s WITH revision_history`, ts[2]),
+ fmt.Sprintf(`BACKUP data.bank TO $1 AS OF SYSTEM TIME %s WITH revision_history`, ts[2]),
fullTableBackup,
)
@@ -3630,35 +3704,32 @@ func TestRestoreAsOfSystemTime(t *testing.T) {
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[8])
sqlDB.Exec(t,
- fmt.Sprintf(`BACKUP DATABASE data INTO LATEST IN $1 AS OF SYSTEM TIME %s WITH revision_history, incremental_location=$2`, ts[5]),
- fullWithRevision, incWithRevision,
+ fmt.Sprintf(`BACKUP DATABASE data TO $1 AS OF SYSTEM TIME %s INCREMENTAL FROM $2 WITH revision_history`, ts[5]),
+ incBackup, fullBackup,
)
sqlDB.Exec(t,
- `BACKUP DATABASE data INTO LATEST IN $1 WITH revision_history, incremental_location=$2`,
- fullWithRevision, incWithRevision,
+ `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2, $3 WITH revision_history`,
+ inc2Backup, fullBackup, incBackup,
)
sqlDB.Exec(t,
- fmt.Sprintf(`BACKUP DATABASE data INTO LATEST IN $1 AS OF SYSTEM TIME %s WITH incremental_location=$2`, ts[5]),
- fullNoRevision, incNoRevision,
+ fmt.Sprintf(`BACKUP DATABASE data TO $1 AS OF SYSTEM TIME %s INCREMENTAL FROM $2`, ts[5]),
+ incLatestBackup, latestBackup,
)
sqlDB.Exec(t,
- `BACKUP DATABASE data INTO LATEST IN $1 WITH incremental_location=$2`,
- fullNoRevision, incNoRevision,
+ `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2, $3`,
+ inc2LatestBackup, latestBackup, incLatestBackup,
)
incTableBackup := dir + "/inctbl"
sqlDB.Exec(t,
- `BACKUP TABLE data.bank INTO LATEST IN $1 WITH revision_history, incremental_location=$2`,
- fullTableBackup, incTableBackup,
+ `BACKUP data.bank TO $1 INCREMENTAL FROM $2 WITH revision_history`,
+ incTableBackup, fullTableBackup,
)
var after string
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&after)
- // fullWithRevision and fullNoRevision backs up to t=2 (with and without revision history, respectively)
- // incWithRevision and incNoRevision each contain two incremental backups, one at t=5 and one at t>=8.
- // fullTableBackup and incTableBackup backs up to t=2 and t=8, respectively (with revision history)
for i, timestamp := range ts {
name := fmt.Sprintf("ts%d", i)
t.Run(name, func(t *testing.T) {
@@ -3672,16 +3743,16 @@ func TestRestoreAsOfSystemTime(t *testing.T) {
// time-travel.
sqlDB.Exec(t,
fmt.Sprintf(
- `RESTORE TABLE data.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH incremental_location=$2, into_db='%s'`,
+ `RESTORE data.* FROM $1, $2, $3 AS OF SYSTEM TIME %s WITH into_db='%s'`,
timestamp, name,
),
- fullWithRevision, incWithRevision,
+ fullBackup, incBackup, inc2Backup,
)
// Similarly restore the since-table backup -- since full DB and single table
// backups sometimes behave differently.
sqlDB.Exec(t,
fmt.Sprintf(
- `RESTORE TABLE data.bank FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH incremental_location=$2, into_db='%stbl'`,
+ `RESTORE data.bank FROM $1, $2 AS OF SYSTEM TIME %s WITH into_db='%stbl'`,
timestamp, name,
),
fullTableBackup, incTableBackup,
@@ -3713,27 +3784,27 @@ func TestRestoreAsOfSystemTime(t *testing.T) {
})
}
- t.Run("no revision history", func(t *testing.T) {
+ t.Run("latest", func(t *testing.T) {
sqlDB = sqlutils.MakeSQLRunner(sqlDB.DB)
- // The backups without revision history didn't specify ALL mvcc values, so we can't restore
+ // The "latest" backup didn't specify ALL mvcc values, so we can't restore
// to times in the middle.
sqlDB.Exec(t, `CREATE DATABASE err`)
- // fullWithRevision covers up to ts[2], incWithRevision to ts[5] and ts[8]
+ // fullBackup covers up to ts[2], inc to ts[5], inc2 to > ts[8].
sqlDB.ExpectErr(
t, "invalid RESTORE timestamp",
- fmt.Sprintf(`RESTORE TABLE data.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='err'`, ts[3]),
- fullWithRevision,
+ fmt.Sprintf(`RESTORE data.* FROM $1 AS OF SYSTEM TIME %s WITH into_db='err'`, ts[3]),
+ fullBackup,
)
for _, i := range ts {
if i == ts[2] {
- // fullNoRevision is _at_ ts2 so that is the time, and the only time, at
+ // latestBackup is _at_ ts2 so that is the time, and the only time, at
// which restoring it is allowed.
sqlDB.Exec(
- t, fmt.Sprintf(`RESTORE TABLE data.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='err'`, i),
- fullNoRevision,
+ t, fmt.Sprintf(`RESTORE data.* FROM $1 AS OF SYSTEM TIME %s WITH into_db='err'`, i),
+ latestBackup,
)
sqlDB.Exec(t, `DROP DATABASE err`)
sqlDB.Exec(t, `CREATE DATABASE err`)
@@ -3741,34 +3812,35 @@ func TestRestoreAsOfSystemTime(t *testing.T) {
} else {
sqlDB.ExpectErr(
t, "invalid RESTORE timestamp",
- fmt.Sprintf(`RESTORE TABLE data.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='err'`, i),
- fullNoRevision,
+ fmt.Sprintf(`RESTORE data.* FROM $1 AS OF SYSTEM TIME %s WITH into_db='err'`, i),
+ latestBackup,
)
}
if i == ts[2] || i == ts[5] {
- // fullNoRevision is _at_ ts2 and the first incremental of incNoRevision is at ts5, so either of
- // those are valid for the chain. In fact there's a third time -- the second backup of incNoRevision,
- // that is valid as well but it isn't fixed when created above so we can't know it / test for it.
+ // latestBackup is _at_ ts2 and incLatestBackup is at ts5, so either of
+ // those are valid for the chain (latest,incLatest,inc2Latest). In fact
+ // there's a third time -- that of inc2Latest, that is valid as well but
+ // it isn't fixed when created above so we know it / test for it.
sqlDB.Exec(
- t, fmt.Sprintf(`RESTORE TABLE data.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH incremental_location=$2, into_db='err'`, i),
- fullNoRevision, incNoRevision,
+ t, fmt.Sprintf(`RESTORE data.* FROM $1, $2, $3 AS OF SYSTEM TIME %s WITH into_db='err'`, i),
+ latestBackup, incLatestBackup, inc2LatestBackup,
)
sqlDB.Exec(t, `DROP DATABASE err`)
sqlDB.Exec(t, `CREATE DATABASE err`)
} else {
sqlDB.ExpectErr(
t, "invalid RESTORE timestamp",
- fmt.Sprintf(`RESTORE TABLE data.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH incremental_location=$2, into_db='err'`, i),
- fullNoRevision, incNoRevision,
+ fmt.Sprintf(`RESTORE data.* FROM $1, $2, $3 AS OF SYSTEM TIME %s WITH into_db='err'`, i),
+ latestBackup, incLatestBackup, inc2LatestBackup,
)
}
}
sqlDB.ExpectErr(
t, "invalid RESTORE timestamp",
- fmt.Sprintf(`RESTORE TABLE data.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='err'`, after),
- fullNoRevision,
+ fmt.Sprintf(`RESTORE data.* FROM $1 AS OF SYSTEM TIME %s WITH into_db='err'`, after),
+ latestBackup,
)
})
@@ -3785,7 +3857,7 @@ func TestRestoreAsOfSystemTime(t *testing.T) {
sqlDB.Exec(t, "DROP TABLE drop_table_db.a")
sqlDB.Exec(t, `BACKUP DATABASE drop_table_db INTO LATEST IN $1 WITH revision_history`, backupPath)
restoreQuery := fmt.Sprintf(
- "RESTORE TABLE drop_table_db.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='drop_table_db_restore'", tsBefore)
+ "RESTORE drop_table_db.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='drop_table_db_restore'", tsBefore)
sqlDB.Exec(t, restoreQuery, backupPath)
restoredTableQuery := "SELECT * FROM drop_table_db_restore.a"
@@ -3805,7 +3877,7 @@ func TestRestoreAsOfSystemTime(t *testing.T) {
sqlDB.Exec(t, "DROP TABLE create_and_drop.a")
sqlDB.Exec(t, `BACKUP DATABASE create_and_drop INTO LATEST IN $1 WITH revision_history`, backupPath)
restoreQuery := fmt.Sprintf(
- "RESTORE TABLE create_and_drop.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='create_and_drop_restore'", tsBefore)
+ "RESTORE create_and_drop.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='create_and_drop_restore'", tsBefore)
sqlDB.Exec(t, restoreQuery, backupPath)
restoredTableQuery := "SELECT * FROM create_and_drop_restore.a"
@@ -3821,18 +3893,18 @@ func TestRestoreAsOfSystemTime(t *testing.T) {
sqlDB.Exec(t, "CREATE TABLE ignore_dropped_table.a (k int, v string)")
sqlDB.Exec(t, "CREATE TABLE ignore_dropped_table.b (k int, v string)")
sqlDB.Exec(t, "DROP TABLE ignore_dropped_table.a")
- sqlDB.Exec(t, `BACKUP DATABASE ignore_dropped_table INTO $1 WITH revision_history`, backupPath)
+ sqlDB.Exec(t, `BACKUP DATABASE ignore_dropped_table TO $1 WITH revision_history`, backupPath)
// Make a backup without any changes to the schema. This ensures that table
// "a" is not included in the span for this incremental backup.
- sqlDB.Exec(t, `BACKUP DATABASE ignore_dropped_table INTO $1 WITH revision_history`, backupPath)
+ sqlDB.Exec(t, `BACKUP DATABASE ignore_dropped_table TO $1 WITH revision_history`, backupPath)
// Edit the schemas to back up to ensure there are revisions generated.
// Table a should not be considered part of the span of the next backup.
sqlDB.Exec(t, "CREATE TABLE ignore_dropped_table.c (k int, v string)")
- sqlDB.Exec(t, `BACKUP DATABASE ignore_dropped_table INTO $1 WITH revision_history`, backupPath)
+ sqlDB.Exec(t, `BACKUP DATABASE ignore_dropped_table TO $1 WITH revision_history`, backupPath)
// Ensure it can be restored.
sqlDB.Exec(t, "DROP DATABASE ignore_dropped_table")
- sqlDB.Exec(t, "RESTORE DATABASE ignore_dropped_table FROM LATEST IN $1", backupPath)
+ sqlDB.Exec(t, "RESTORE DATABASE ignore_dropped_table FROM $1", backupPath)
})
}
@@ -3861,7 +3933,7 @@ func TestEmptyBackupsInChain(t *testing.T) {
sqlDB.Exec(t, "DROP TABLE create_and_drop.a")
sqlDB.Exec(t, `BACKUP DATABASE create_and_drop INTO LATEST IN $1 WITH revision_history`, backupPath)
restoreQuery := fmt.Sprintf(
- "RESTORE TABLE create_and_drop.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='create_and_drop_restore'", tsBefore)
+ "RESTORE create_and_drop.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='create_and_drop_restore'", tsBefore)
sqlDB.Exec(t, restoreQuery, backupPath)
restoredTableQuery := "SELECT * FROM create_and_drop_restore.a"
@@ -3879,7 +3951,7 @@ func TestEmptyBackupsInChain(t *testing.T) {
sqlDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&tsBefore)
sqlDB.Exec(t, `BACKUP DATABASE create_and_drop2 INTO LATEST IN $1 WITH revision_history`, backupPath)
restoreQuery := fmt.Sprintf(
- "RESTORE TABLE create_and_drop2.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='create_and_drop_restore2'", tsBefore)
+ "RESTORE create_and_drop2.* FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='create_and_drop_restore2'", tsBefore)
sqlDB.Exec(t, restoreQuery, backupPath)
sqlDB.CheckQueryResults(t, `SELECT table_name FROM [SHOW TABLES]`, [][]string{{"bank"}})
})
@@ -3915,15 +3987,15 @@ func TestRestoreAsOfSystemTimeGCBounds(t *testing.T) {
postGC := eval.TimestampToDecimalDatum(s.Clock().Now()).String()
lateFullTableBackup := dir + "/tbl-after-gc"
- sqlDB.Exec(t, `BACKUP TABLE data.bank INTO $1 WITH revision_history`, lateFullTableBackup)
+ sqlDB.Exec(t, `BACKUP data.bank INTO $1 WITH revision_history`, lateFullTableBackup)
sqlDB.Exec(t, `DROP TABLE data.bank`)
sqlDB.ExpectErr(
t, `BACKUP for requested time only has revision history from`,
- fmt.Sprintf(`RESTORE TABLE data.bank FROM LATEST IN $1 AS OF SYSTEM TIME %s`, preGC),
+ fmt.Sprintf(`RESTORE data.bank FROM LATEST IN $1 AS OF SYSTEM TIME %s`, preGC),
lateFullTableBackup,
)
sqlDB.Exec(
- t, fmt.Sprintf(`RESTORE TABLE data.bank FROM LATEST IN $1 AS OF SYSTEM TIME %s`, postGC), lateFullTableBackup,
+ t, fmt.Sprintf(`RESTORE data.bank FROM LATEST IN $1 AS OF SYSTEM TIME %s`, postGC), lateFullTableBackup,
)
t.Run("restore-pre-gc-aost", func(t *testing.T) {
@@ -3958,13 +4030,13 @@ func TestAsOfSystemTimeOnRestoredData(t *testing.T) {
const numAccounts = 10
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
- sqlDB.Exec(t, `BACKUP TABLE data.* INTO $1`, localFoo)
+ sqlDB.Exec(t, `BACKUP data.* INTO $1`, localFoo)
sqlDB.Exec(t, `DROP TABLE data.bank`)
var beforeTs string
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&beforeTs)
- sqlDB.Exec(t, `RESTORE TABLE data.* FROM LATEST IN $1`, localFoo)
+ sqlDB.Exec(t, `RESTORE data.* FROM LATEST IN $1`, localFoo)
var afterTs string
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&afterTs)
@@ -3991,12 +4063,11 @@ func TestBackupRestoreChecksum(t *testing.T) {
defer cleanupFn()
dir = filepath.Join(dir, "foo")
- sqlDB.Exec(t, `BACKUP DATABASE data INTO $1`, localFoo)
- backupPath := getFullBackupPaths(t, sqlDB, localFoo)[0]
+ sqlDB.Exec(t, `BACKUP DATABASE data TO $1`, localFoo)
var backupManifest backuppb.BackupManifest
{
- backupManifestBytes, err := os.ReadFile(filepath.Join(dir, backupPath, backupbase.BackupManifestName))
+ backupManifestBytes, err := os.ReadFile(filepath.Join(dir, backupbase.BackupManifestName))
if err != nil {
t.Fatalf("%+v", err)
}
@@ -4013,7 +4084,7 @@ func TestBackupRestoreChecksum(t *testing.T) {
// Corrupt all of the files in the backup.
for i := range backupManifest.Files {
- f, err := os.OpenFile(filepath.Join(dir, backupPath, backupManifest.Files[i].Path), os.O_WRONLY, 0)
+ f, err := os.OpenFile(filepath.Join(dir, backupManifest.Files[i].Path), os.O_WRONLY, 0)
if err != nil {
t.Fatalf("%+v", err)
}
@@ -4031,106 +4102,10 @@ func TestBackupRestoreChecksum(t *testing.T) {
}
sqlDB.Exec(t, `DROP TABLE data.bank`)
- sqlDB.ExpectErr(t, "checksum mismatch", `RESTORE TABLE data.* FROM LATEST IN $1`, localFoo)
+ sqlDB.ExpectErr(t, "checksum mismatch", `RESTORE data.* FROM $1`, localFoo)
}
-// TestNonLinearChain observes the effect of a non-linear chain of backups, for
-// example if two inc backups run concurrently, where the second starts before
-// the first finishes and thus does not use the first's end time when picking a
-// start time. In such a chain this first backup is made redundant by the second
-// and should be ignored by restore rather than restored.
-func TestNonLinearChain(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
-
- dir, cleanup := testutils.TempDir(t)
- defer cleanup()
-
- tc := testcluster.NewTestCluster(t, 1, base.TestClusterArgs{ServerArgs: base.TestServerArgs{
- DefaultTestTenant: base.TODOTestTenantDisabled, ExternalIODir: dir, Knobs: base.TestingKnobs{
- JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(),
- },
- }})
-
- tc.Start(t)
- defer tc.Stopper().Stop(context.Background())
-
- sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
-
- // Make a table with a row in it and make a full backup of it.
- sqlDB.Exec(t, `CREATE TABLE t (a INT PRIMARY KEY)`)
- sqlDB.Exec(t, `INSERT INTO t VALUES (0)`)
- sqlDB.Exec(t, `BACKUP TABLE defaultdb.t INTO $1`, localFoo)
- require.Len(t, sqlDB.QueryStr(t, `SELECT DISTINCT end_time FROM [SHOW BACKUP FROM LATEST IN $1]`, localFoo), 1)
-
- // Write a row and note the time that includes that row.
- var ts1, ts2 string
- sqlDB.Exec(t, `INSERT INTO t VALUES (1)`)
- sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts1)
-
- // Start *but pause rather than finish* an inc backup to ts1 of our new row.
- var j jobspb.JobID
- sqlDB.Exec(t, `SET CLUSTER SETTING jobs.debug.pausepoints = 'backup.before.flow'`)
- sqlDB.QueryRow(t, fmt.Sprintf(`BACKUP TABLE defaultdb.t INTO LATEST IN $1 AS OF SYSTEM TIME %s WITH DETACHED`, ts1), localFoo).Scan(&j)
- jobutils.WaitForJobToPause(t, sqlDB, j)
- sqlDB.Exec(t, `RESET CLUSTER SETTING jobs.debug.pausepoints`)
-
- // Add another row and record the time that includes it.
- sqlDB.Exec(t, `INSERT INTO t VALUES (2)`)
- sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts2)
-
- // Run -- and finish -- an inc backup to ts2. Since the first inc has not yet
- // finished, this will find the full as its parent and use its end, rather
- // than the paused inc, as its start time.
- sqlDB.Exec(t, fmt.Sprintf(`BACKUP TABLE defaultdb.t INTO LATEST IN $1 AS OF SYSTEM TIME %s`, ts2), localFoo)
-
- // We should see two end times now in the shown backup -- the full and this
- // (second) inc.
- require.Len(t, sqlDB.QueryStr(t, `SELECT DISTINCT end_time FROM [SHOW BACKUP FROM LATEST IN $1]`, localFoo), 2)
-
- // Now we have a full ending at t0, an incomplete inc from t0 to t1, and a
- // complete inc also from t0 but to t2. We will move `t` out of our way and
- // run a restore of the chain, i.e. to t2 to see what happens, noting how many
- // files we open to do so.
- sqlDB.Exec(t, `DROP TABLE t`)
- openedBefore := tc.Servers[0].MustGetSQLCounter("cloud.readers_opened")
- sqlDB.Exec(t, `RESTORE TABLE defaultdb.t FROM LATEST IN $1`, localFoo)
- sqlDB.CheckQueryResults(t, `SELECT * FROM t`, [][]string{{"0"}, {"1"}, {"2"}})
-
- // Note how many files the restore opened.
- openedA := tc.Servers[0].MustGetSQLCounter("cloud.readers_opened") - openedBefore
-
- // Now let's let the paused backup finish, adding a bonus "spur" to the chian.
- sqlDB.Exec(t, `RESUME JOB $1`, j)
- jobutils.WaitForJobToSucceed(t, sqlDB, j)
-
- // We should see three end times now in the shown backup -- the full, the 2nd
- // inc we saw before, but now also this first inc as well.
- require.Len(t, sqlDB.QueryStr(t, `SELECT DISTINCT end_time FROM [SHOW BACKUP FROM LATEST IN $1]`, localFoo), 3)
-
- // Restore the same thing -- t2 -- we did before but now with the extra inc
- // spur hanging out in the chain. This should produce the same result, and we
- // would like it to only open one extra file to do so -- the manifest that
- // includes the timestamps that then show it is not needed by the restore.
- sqlDB.Exec(t, `DROP TABLE t`)
- sqlDB.Exec(t, `RESTORE TABLE defaultdb.t FROM LATEST IN $1`, localFoo)
- sqlDB.CheckQueryResults(t, `SELECT * FROM t`, [][]string{{"0"}, {"1"}, {"2"}})
- openedB := tc.Servers[0].MustGetSQLCounter("cloud.readers_opened") - openedA - openedBefore
- // TODO(dt): enable this assertion once it holds.
- if false {
- require.Equal(t, openedA+1, openedB)
- } else {
- require.Less(t, openedA+1, openedB)
- }
-
- // Finally, make sure we can restore from the tip of the spur, not just the
- // tip of the chain.
- sqlDB.Exec(t, `DROP TABLE t`)
- sqlDB.Exec(t, fmt.Sprintf(`RESTORE TABLE defaultdb.t FROM LATEST IN $1 AS OF SYSTEM TIME %s`, ts1), localFoo)
- sqlDB.CheckQueryResults(t, `SELECT * FROM t`, [][]string{{"0"}, {"1"}})
-}
-
-func TestBackupRestoreMissingFulls(t *testing.T) {
+func TestTimestampMismatch(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 1
@@ -4142,25 +4117,71 @@ func TestBackupRestoreMissingFulls(t *testing.T) {
sqlDB.Exec(t, `INSERT INTO data.t2 VALUES (1)`)
fullBackup := localFoo + "/0"
- incBackup := localFoo + "/1"
-
- sqlDB.Exec(t, `BACKUP DATABASE data INTO $1`, fullBackup)
- sqlDB.Exec(t, `BACKUP DATABASE data INTO LATEST IN $1 WITH incremental_location=$2`, fullBackup, incBackup)
+ incrementalT1FromFull := localFoo + "/1"
+ incrementalT2FromT1 := localFoo + "/2"
+ incrementalT3FromT1OneTable := localFoo + "/3"
+
+ sqlDB.Exec(t, `BACKUP DATABASE data TO $1`,
+ fullBackup)
+ sqlDB.Exec(t, `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2`,
+ incrementalT1FromFull, fullBackup)
+ sqlDB.Exec(t, `BACKUP TABLE data.bank TO $1 INCREMENTAL FROM $2`,
+ incrementalT3FromT1OneTable, fullBackup)
+ sqlDB.Exec(t, `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2, $3`,
+ incrementalT2FromT1, fullBackup, incrementalT1FromFull)
t.Run("Backup", func(t *testing.T) {
// Missing the initial full backup.
sqlDB.ExpectErr(
- t, "path does not contain a completed latest backup",
- `BACKUP DATABASE data INTO LATEST IN $1 WITH incremental_location=$2`,
- localFoo+"/missing-initial", incBackup,
+ t, "backups listed out of order",
+ `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2`,
+ localFoo+"/missing-initial", incrementalT1FromFull,
+ )
+
+ // Missing an intermediate incremental backup.
+ sqlDB.ExpectErr(
+ t, "backups listed out of order",
+ `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2, $3`,
+ localFoo+"/missing-incremental", fullBackup, incrementalT2FromT1,
+ )
+
+ // Backups specified out of order.
+ sqlDB.ExpectErr(
+ t, "out of order",
+ `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2, $3`,
+ localFoo+"/ooo", incrementalT1FromFull, fullBackup,
+ )
+
+ // Missing data for one table in the most recent backup.
+ sqlDB.ExpectErr(
+ t, "previous backup does not contain table",
+ `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2, $3`,
+ localFoo+"/missing-table-data", fullBackup, incrementalT3FromT1OneTable,
)
})
+ sqlDB.Exec(t, `DROP TABLE data.bank`)
+ sqlDB.Exec(t, `DROP TABLE data.t2`)
t.Run("Restore", func(t *testing.T) {
// Missing the initial full backup.
+ sqlDB.ExpectErr(t, "no backup covers time", `RESTORE data.* FROM $1`, incrementalT1FromFull)
+
+ // Missing an intermediate incremental backup.
sqlDB.ExpectErr(
- t, "path does not contain a completed latest backup", `RESTORE TABLE data.* FROM LATEST IN $1`,
- incBackup,
+ t, "no backup covers time",
+ `RESTORE data.* FROM $1, $2`, fullBackup, incrementalT2FromT1,
+ )
+
+ // Backups specified out of order.
+ sqlDB.ExpectErr(
+ t, "out of order",
+ `RESTORE data.* FROM $1, $2`, incrementalT1FromFull, fullBackup,
+ )
+
+ // Missing data for one table in the most recent backup.
+ sqlDB.ExpectErr(
+ t, "table \"data.t2\" does not exist",
+ `RESTORE data.bank, data.t2 FROM $1, $2`, fullBackup, incrementalT3FromT1OneTable,
)
})
}
@@ -4683,7 +4704,7 @@ func TestRestoredPrivileges(t *testing.T) {
withGrants := sqlDB.QueryStr(t, `SHOW GRANTS ON data.bank`)
- sqlDB.Exec(t, `BACKUP DATABASE data, data2 INTO $1`, localFoo)
+ sqlDB.Exec(t, `BACKUP DATABASE data, data2 TO $1`, localFoo)
sqlDB.Exec(t, `DROP TABLE data.bank`)
t.Run("into fresh db", func(t *testing.T) {
@@ -4691,7 +4712,7 @@ func TestRestoredPrivileges(t *testing.T) {
defer tc.Stopper().Stop(context.Background())
sqlDBRestore := sqlutils.MakeSQLRunner(tc.Conns[0])
sqlDBRestore.Exec(t, `CREATE DATABASE data`)
- sqlDBRestore.Exec(t, `RESTORE TABLE data.bank FROM LATEST IN $1`, localFoo)
+ sqlDBRestore.Exec(t, `RESTORE data.bank FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SHOW GRANTS ON data.bank`, rootOnly)
})
@@ -4703,7 +4724,7 @@ func TestRestoredPrivileges(t *testing.T) {
sqlDBRestore.Exec(t, `CREATE USER someone`)
sqlDBRestore.Exec(t, `USE data`)
sqlDBRestore.Exec(t, `ALTER DEFAULT PRIVILEGES GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO someone`)
- sqlDBRestore.Exec(t, `RESTORE TABLE data.bank FROM LATEST IN $1`, localFoo)
+ sqlDBRestore.Exec(t, `RESTORE data.bank FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SHOW GRANTS ON data.bank`, withGrants)
})
@@ -4712,7 +4733,7 @@ func TestRestoredPrivileges(t *testing.T) {
defer tc.Stopper().Stop(context.Background())
sqlDBRestore := sqlutils.MakeSQLRunner(tc.Conns[0])
sqlDBRestore.Exec(t, `CREATE USER someone`)
- sqlDBRestore.Exec(t, `RESTORE DATABASE data2 FROM LATEST IN $1`, localFoo)
+ sqlDBRestore.Exec(t, `RESTORE DATABASE data2 FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SHOW GRANTS ON DATABASE data2`, data2Grants)
})
}
@@ -4725,9 +4746,9 @@ func TestRestoreInto(t *testing.T) {
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
- sqlDB.Exec(t, `BACKUP DATABASE data INTO $1`, localFoo)
+ sqlDB.Exec(t, `BACKUP DATABASE data TO $1`, localFoo)
- restoreStmt := fmt.Sprintf(`RESTORE TABLE data.bank FROM LATEST IN '%s' WITH into_db = 'data 2'`, localFoo)
+ restoreStmt := fmt.Sprintf(`RESTORE data.bank FROM '%s' WITH into_db = 'data 2'`, localFoo)
sqlDB.ExpectErr(t, "a database named \"data 2\" needs to exist", restoreStmt)
@@ -4763,10 +4784,10 @@ func TestRestoreDatabaseVersusTable(t *testing.T) {
d4foobar := "nodelocal://1/d4foobar"
d4star := "nodelocal://1/d4star"
- origDB.Exec(t, `BACKUP DATABASE data, d2, d3, d4 INTO $1`, localFoo)
- origDB.Exec(t, `BACKUP TABLE d4.foo INTO $1`, d4foo)
- origDB.Exec(t, `BACKUP TABLE d4.foo, d4.bar INTO $1`, d4foobar)
- origDB.Exec(t, `BACKUP TABLE d4.* INTO $1`, d4star)
+ origDB.Exec(t, `BACKUP DATABASE data, d2, d3, d4 TO $1`, localFoo)
+ origDB.Exec(t, `BACKUP d4.foo TO $1`, d4foo)
+ origDB.Exec(t, `BACKUP d4.foo, d4.bar TO $1`, d4foobar)
+ origDB.Exec(t, `BACKUP d4.* TO $1`, d4star)
t.Run("incomplete-db", func(t *testing.T) {
tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args})
@@ -4777,37 +4798,37 @@ func TestRestoreDatabaseVersusTable(t *testing.T) {
sqlDB.ExpectErr(
t, "cannot RESTORE DATABASE from a backup of individual tables",
- `RESTORE DATABASE d4 FROM LATEST IN $1`, d4foo,
+ `RESTORE database d4 FROM $1`, d4foo,
)
sqlDB.ExpectErr(
t, "cannot RESTORE .* from a backup of individual tables",
- `RESTORE TABLE d4.* FROM LATEST IN $1 WITH into_db = 'd5'`, d4foo,
+ `RESTORE d4.* FROM $1 WITH into_db = 'd5'`, d4foo,
)
sqlDB.ExpectErr(
t, "cannot RESTORE DATABASE from a backup of individual tables",
- `RESTORE DATABASE d4 FROM LATEST IN $1`, d4foobar,
+ `RESTORE database d4 FROM $1`, d4foobar,
)
sqlDB.ExpectErr(
t, "cannot RESTORE .* from a backup of individual tables",
- `RESTORE TABLE d4.* FROM LATEST IN $1 WITH into_db = 'd5'`, d4foobar,
+ `RESTORE d4.* FROM $1 WITH into_db = 'd5'`, d4foobar,
)
sqlDB.ExpectErr(
t, "cannot RESTORE DATABASE from a backup of individual tables",
- `RESTORE DATABASE d4 FROM LATEST IN $1`, d4foo,
+ `RESTORE database d4 FROM $1`, d4foo,
)
- sqlDB.Exec(t, `RESTORE DATABASE d4 FROM LATEST IN $1`, d4star)
+ sqlDB.Exec(t, `RESTORE database d4 FROM $1`, d4star)
})
t.Run("db", func(t *testing.T) {
tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args})
defer tcRestore.Stopper().Stop(context.Background())
sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0])
- sqlDB.Exec(t, `RESTORE DATABASE data, d2, d3 FROM LATEST IN $1`, localFoo)
+ sqlDB.Exec(t, `RESTORE DATABASE data, d2, d3 FROM $1`, localFoo)
})
t.Run("db-exists", func(t *testing.T) {
@@ -4816,7 +4837,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) {
sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0])
sqlDB.Exec(t, `CREATE DATABASE data`)
- sqlDB.ExpectErr(t, "already exists", `RESTORE DATABASE data FROM LATEST IN $1`, localFoo)
+ sqlDB.ExpectErr(t, "already exists", `RESTORE DATABASE data FROM $1`, localFoo)
})
t.Run("tables", func(t *testing.T) {
@@ -4825,7 +4846,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) {
sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0])
sqlDB.Exec(t, `CREATE DATABASE data`)
- sqlDB.Exec(t, `RESTORE TABLE data.* FROM LATEST IN $1`, localFoo)
+ sqlDB.Exec(t, `RESTORE data.* FROM $1`, localFoo)
})
t.Run("tables-needs-db", func(t *testing.T) {
@@ -4833,7 +4854,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) {
defer tcRestore.Stopper().Stop(context.Background())
sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0])
- sqlDB.ExpectErr(t, "needs to exist", `RESTORE TABLE data.*, d4.* FROM LATEST IN $1`, localFoo)
+ sqlDB.ExpectErr(t, "needs to exist", `RESTORE data.*, d4.* FROM $1`, localFoo)
})
t.Run("into_db", func(t *testing.T) {
@@ -4843,7 +4864,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) {
sqlDB.ExpectErr(
t, `cannot use "into_db"`,
- `RESTORE DATABASE data FROM LATEST IN $1 WITH into_db = 'other'`, localFoo,
+ `RESTORE DATABASE data FROM $1 WITH into_db = 'other'`, localFoo,
)
})
}
@@ -4884,11 +4905,11 @@ func TestPointInTimeRecovery(t *testing.T) {
defer cleanupFn()
fullBackupDir := localFoo + "/full"
- sqlDB.Exec(t, `BACKUP TABLE data.* INTO $1`, fullBackupDir)
+ sqlDB.Exec(t, `BACKUP data.* INTO $1`, fullBackupDir)
sqlDB.Exec(t, `UPDATE data.bank SET balance = 2`)
- sqlDB.Exec(t, `BACKUP TABLE data.* INTO LATEST IN $1`, fullBackupDir)
+ sqlDB.Exec(t, `BACKUP data.* INTO LATEST IN $1`, fullBackupDir)
var beforeBadThingTs string
sqlDB.Exec(t, `UPDATE data.bank SET balance = 3`)
@@ -4910,11 +4931,11 @@ func TestPointInTimeRecovery(t *testing.T) {
sqlDB = sqlutils.MakeSQLRunner(sqlDB.DB)
recoveryDir := localFoo + "/new-backup"
sqlDB.Exec(t,
- fmt.Sprintf(`BACKUP TABLE data.* INTO $1 AS OF SYSTEM TIME '%s'`, beforeBadThingTs),
+ fmt.Sprintf(`BACKUP data.* INTO $1 AS OF SYSTEM TIME '%s'`, beforeBadThingTs),
recoveryDir,
)
sqlDB.Exec(t, `CREATE DATABASE newbackup`)
- sqlDB.Exec(t, `RESTORE TABLE data.* FROM LATEST IN $1 WITH into_db=newbackup`, recoveryDir)
+ sqlDB.Exec(t, `RESTORE data.* FROM LATEST IN $1 WITH into_db=newbackup`, recoveryDir)
// Some manual reconciliation of the data in data.bank and
// newbackup.bank could be done here by the operator.
@@ -4931,11 +4952,11 @@ func TestPointInTimeRecovery(t *testing.T) {
t.Run("recovery=inc-backup", func(t *testing.T) {
sqlDB = sqlutils.MakeSQLRunner(sqlDB.DB)
sqlDB.Exec(t,
- fmt.Sprintf(`BACKUP TABLE data.* INTO LATEST IN $1 AS OF SYSTEM TIME '%s'`, beforeBadThingTs),
+ fmt.Sprintf(`BACKUP data.* INTO LATEST IN $1 AS OF SYSTEM TIME '%s'`, beforeBadThingTs),
fullBackupDir)
sqlDB.Exec(t, `CREATE DATABASE incbackup`)
sqlDB.Exec(t,
- `RESTORE TABLE data.* FROM LATEST IN $1 WITH into_db=incbackup`,
+ `RESTORE data.* FROM LATEST IN $1 WITH into_db=incbackup`,
fullBackupDir)
// Some manual reconciliation of the data in data.bank and
@@ -4961,9 +4982,9 @@ func TestBackupRestoreDropDB(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE data.bank (i int)`)
sqlDB.Exec(t, `INSERT INTO data.bank VALUES (1)`)
- sqlDB.Exec(t, "BACKUP DATABASE data INTO $1", localFoo)
+ sqlDB.Exec(t, "BACKUP DATABASE data TO $1", localFoo)
sqlDB.Exec(t, "CREATE DATABASE data2")
- sqlDB.Exec(t, "RESTORE TABLE data.* FROM LATEST IN $1 WITH OPTIONS (into_db='data2')", localFoo)
+ sqlDB.Exec(t, "RESTORE data.* FROM $1 WITH OPTIONS (into_db='data2')", localFoo)
expected := sqlDB.QueryStr(t, `SELECT * FROM data.bank`)
sqlDB.CheckQueryResults(t, `SELECT * FROM data2.bank`, expected)
@@ -4983,9 +5004,9 @@ func TestBackupRestoreDropTable(t *testing.T) {
INSERT INTO data.bank VALUES (1);
`)
- sqlDB.Exec(t, "BACKUP DATABASE data INTO $1", localFoo)
+ sqlDB.Exec(t, "BACKUP DATABASE data TO $1", localFoo)
sqlDB.Exec(t, "CREATE DATABASE data2")
- sqlDB.Exec(t, "RESTORE TABLE data.* FROM LATEST IN $1 WITH OPTIONS (into_db='data2')", localFoo)
+ sqlDB.Exec(t, "RESTORE data.* FROM $1 WITH OPTIONS (into_db='data2')", localFoo)
expected := sqlDB.QueryStr(t, `SELECT * FROM data.bank`)
sqlDB.CheckQueryResults(t, `SELECT * FROM data2.bank`, expected)
@@ -5002,11 +5023,11 @@ func TestBackupRestoreIncrementalAddTable(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE data.t (s string PRIMARY KEY)`)
full := localFoo + "/full"
sqlDB.Exec(t, `INSERT INTO data.t VALUES ('before')`)
- sqlDB.Exec(t, `BACKUP TABLE data.*, data2.* INTO $1`, full)
+ sqlDB.Exec(t, `BACKUP data.*, data2.* INTO $1`, full)
sqlDB.Exec(t, `UPDATE data.t SET s = 'after'`)
sqlDB.Exec(t, `CREATE TABLE data2.t2 (i int)`)
- sqlDB.Exec(t, "BACKUP TABLE data.*, data2.* INTO $1", full)
+ sqlDB.Exec(t, "BACKUP data.*, data2.* INTO $1", full)
}
func TestBackupRestoreIncrementalAddTableMissing(t *testing.T) {
@@ -5021,13 +5042,13 @@ func TestBackupRestoreIncrementalAddTableMissing(t *testing.T) {
full := localFoo + "/full"
sqlDB.Exec(t, `INSERT INTO data.t VALUES ('before')`)
- sqlDB.Exec(t, `BACKUP TABLE data.* INTO $1`, full)
+ sqlDB.Exec(t, `BACKUP data.* INTO $1`, full)
sqlDB.Exec(t, `UPDATE data.t SET s = 'after'`)
sqlDB.Exec(t, `CREATE TABLE data2.t2 (i int)`)
sqlDB.ExpectErr(
t, "previous backup does not contain table",
- "BACKUP TABLE data.*, data2.* INTO LATEST IN $1", full,
+ "BACKUP data.*, data2.* INTO LATEST IN $1", full,
)
}
@@ -5060,8 +5081,7 @@ func TestBackupRestoreIncrementalDropTable(t *testing.T) {
full := localFoo + "/full"
sqlDB.Exec(t, `INSERT INTO data.t VALUES ('before')`)
- sqlDB.Exec(t, `BACKUP DATABASE data INTO $1`, full)
- beforeBackupPath := getFullBackupPaths(t, sqlDB, full)[0]
+ sqlDB.Exec(t, `BACKUP DATABASE data TO $1`, full)
sqlDB.Exec(t, `UPDATE data.t SET s = 'after'`)
sqlDB.Exec(t, `DROP TABLE data.t`)
@@ -5069,7 +5089,7 @@ func TestBackupRestoreIncrementalDropTable(t *testing.T) {
sqlDB.Exec(t, `DROP DATABASE data`)
// Restoring to backup before DROP restores t.
- sqlDB.Exec(t, `RESTORE DATABASE data FROM $1 IN $2`, beforeBackupPath, full)
+ sqlDB.Exec(t, `RESTORE DATABASE data FROM $1`, full)
sqlDB.Exec(t, `SELECT 1 FROM data.t LIMIT 0`)
sqlDB.Exec(t, `DROP DATABASE data`)
@@ -5088,18 +5108,18 @@ func TestFileIOLimits(t *testing.T) {
elsewhere := "nodelocal://1/../../blah"
- sqlDB.Exec(t, `BACKUP TABLE data.bank INTO $1`, localFoo)
+ sqlDB.Exec(t, `BACKUP data.bank TO $1`, localFoo)
sqlDB.ExpectErr(
t, "local file access to paths outside of external-io-dir is not allowed",
- `BACKUP TABLE data.bank INTO $1`, elsewhere,
+ `BACKUP data.bank TO $1`, elsewhere,
)
sqlDB.Exec(t, `DROP TABLE data.bank`)
- sqlDB.Exec(t, `RESTORE TABLE data.bank FROM LATEST IN $1`, localFoo)
+ sqlDB.Exec(t, `RESTORE data.bank FROM $1`, localFoo)
sqlDB.ExpectErr(
t, "local file access to paths outside of external-io-dir is not allowed",
- `RESTORE TABLE data.bank FROM LATEST IN $1`, elsewhere,
+ `RESTORE data.bank FROM $1`, elsewhere,
)
}
@@ -5176,26 +5196,26 @@ func TestDetachedRestore(t *testing.T) {
// Run a BACKUP.
sqlDB.Exec(t, `CREATE TABLE data.t (id INT, name STRING)`)
sqlDB.Exec(t, `INSERT INTO data.t VALUES (1, 'foo'), (2, 'bar')`)
- sqlDB.Exec(t, `BACKUP TABLE data.t INTO $1`, localFoo)
+ sqlDB.Exec(t, `BACKUP TABLE data.t TO $1`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE test`)
// Running RESTORE under transaction requires DETACHED.
var jobID jobspb.JobID
err := crdb.ExecuteTx(ctx, db, nil /* txopts */, func(tx *gosql.Tx) error {
- return tx.QueryRow(`RESTORE TABLE t FROM LATEST IN $1 WITH INTO_DB=test`, localFoo).Scan(&jobID)
+ return tx.QueryRow(`RESTORE TABLE t FROM $1 WITH INTO_DB=test`, localFoo).Scan(&jobID)
})
require.True(t, testutils.IsError(err,
"RESTORE cannot be used inside a multi-statement transaction without DETACHED option"))
// Okay to run DETACHED RESTORE, even w/out explicit transaction.
- sqlDB.QueryRow(t, `RESTORE TABLE t FROM LATEST IN $1 WITH DETACHED, INTO_DB=test`,
+ sqlDB.QueryRow(t, `RESTORE TABLE t FROM $1 WITH DETACHED, INTO_DB=test`,
localFoo).Scan(&jobID)
waitForSuccessfulJob(t, tc, jobID)
sqlDB.Exec(t, `DROP TABLE test.t`)
// RESTORE again, under explicit transaction.
err = crdb.ExecuteTx(ctx, db, nil /* txopts */, func(tx *gosql.Tx) error {
- return tx.QueryRow(`RESTORE TABLE t FROM LATEST IN $1 WITH DETACHED, INTO_DB=test`, localFoo).Scan(&jobID)
+ return tx.QueryRow(`RESTORE TABLE t FROM $1 WITH DETACHED, INTO_DB=test`, localFoo).Scan(&jobID)
})
require.NoError(t, err)
waitForSuccessfulJob(t, tc, jobID)
@@ -5208,7 +5228,7 @@ func TestDetachedRestore(t *testing.T) {
tx, err := db.Begin()
require.NoError(t, err)
err = crdb.Execute(func() error {
- return tx.QueryRow(`RESTORE TABLE t FROM LATEST IN $1 WITH DETACHED, INTO_DB=test`, localFoo).Scan(&jobID)
+ return tx.QueryRow(`RESTORE TABLE t FROM $1 WITH DETACHED, INTO_DB=test`, localFoo).Scan(&jobID)
})
require.NoError(t, err)
require.NoError(t, tx.Rollback())
@@ -5231,14 +5251,14 @@ func TestBackupRestoreSequence(t *testing.T) {
origDB.Exec(t, `CREATE TABLE data.t (id INT PRIMARY KEY DEFAULT nextval('data.t_id_seq'), v text)`)
origDB.Exec(t, `INSERT INTO data.t (v) VALUES ('foo'), ('bar'), ('baz')`)
- origDB.Exec(t, `BACKUP DATABASE data INTO $1`, backupLoc)
+ origDB.Exec(t, `BACKUP DATABASE data TO $1`, backupLoc)
t.Run("restore both table & sequence to a new cluster", func(t *testing.T) {
tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args})
defer tc.Stopper().Stop(context.Background())
newDB := sqlutils.MakeSQLRunner(tc.Conns[0])
- newDB.Exec(t, `RESTORE DATABASE data FROM LATEST IN $1`, backupLoc)
+ newDB.Exec(t, `RESTORE DATABASE data FROM $1`, backupLoc)
newDB.Exec(t, `USE data`)
// Verify that the db was restored correctly.
@@ -5289,10 +5309,10 @@ func TestBackupRestoreSequence(t *testing.T) {
newDB.ExpectErr(
t, "pq: cannot restore table \"t\" without referenced sequence \\d+ \\(or \"skip_missing_sequences\" option\\)",
- `RESTORE TABLE t FROM LATEST IN $1`, localFoo,
+ `RESTORE TABLE t FROM $1`, localFoo,
)
- newDB.Exec(t, `RESTORE TABLE t FROM LATEST IN $1 WITH OPTIONS (skip_missing_sequences)`, localFoo)
+ newDB.Exec(t, `RESTORE TABLE t FROM $1 WITH OPTIONS (skip_missing_sequences)`, localFoo)
// Verify that the table was restored correctly.
newDB.CheckQueryResults(t, `SELECT * FROM data.t`, [][]string{
@@ -5321,7 +5341,7 @@ func TestBackupRestoreSequence(t *testing.T) {
newDB.Exec(t, `USE data`)
// TODO(vilterp): create `RESTORE SEQUENCE` instead of `RESTORE TABLE`, and force
// people to use that?
- newDB.Exec(t, `RESTORE TABLE t_id_seq FROM LATEST IN $1`, backupLoc)
+ newDB.Exec(t, `RESTORE TABLE t_id_seq FROM $1`, backupLoc)
// Verify that the sequence value was restored.
newDB.CheckQueryResults(t, `SELECT last_value FROM data.t_id_seq`, [][]string{
@@ -5349,11 +5369,11 @@ func TestBackupRestoreSequencesInViews(t *testing.T) {
sqlDB.Exec(t, `CREATE VIEW v AS SELECT k FROM (SELECT nextval('s') AS k)`)
// Backup the database.
- sqlDB.Exec(t, `BACKUP DATABASE d INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://1/test/'`)
// Drop the database and restore into it.
sqlDB.Exec(t, `DROP DATABASE d`)
- sqlDB.Exec(t, `RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `RESTORE DATABASE d FROM 'nodelocal://1/test/'`)
// Check that the view is not corrupted.
sqlDB.CheckQueryResults(t, `SELECT * FROM d.v`, [][]string{{"1"}})
@@ -5383,12 +5403,12 @@ func TestBackupRestoreSequencesInViews(t *testing.T) {
sqlDB.Exec(t, `CREATE VIEW v AS (SELECT k FROM (SELECT nextval('s') AS k))`)
// Backup v and s.
- sqlDB.Exec(t, `BACKUP TABLE v, s INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP TABLE v, s TO 'nodelocal://1/test/'`)
// Drop v and s.
sqlDB.Exec(t, `DROP VIEW v`)
sqlDB.Exec(t, `DROP SEQUENCE s`)
// Restore v and s.
- sqlDB.Exec(t, `RESTORE TABLE s, v FROM LATEST IN 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `RESTORE TABLE s, v FROM 'nodelocal://1/test/'`)
sqlDB.CheckQueryResults(t, `SHOW CREATE VIEW d.v`, [][]string{{
"d.public.v", "CREATE VIEW public.v (\n\tk\n) AS " +
"(SELECT k FROM (SELECT nextval('public.s'::REGCLASS) AS k) AS \"?subquery1?\")",
@@ -5420,12 +5440,12 @@ func TestBackupRestoreSequencesInViews(t *testing.T) {
sqlDB.Exec(t, `CREATE VIEW v AS (SELECT k FROM (SELECT nextval('s') AS k))`)
// Backup v and drop.
- sqlDB.Exec(t, `BACKUP TABLE v INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP TABLE v TO 'nodelocal://1/test/'`)
sqlDB.Exec(t, `DROP VIEW v`)
// Restore v.
sqlDB.ExpectErr(
t, "pq: cannot restore view \"v\" without restoring referenced table \\(or \"skip_missing_views\" option\\)",
- `RESTORE TABLE v FROM LATEST IN 'nodelocal://1/test/'`,
+ `RESTORE TABLE v FROM 'nodelocal://1/test/'`,
)
})
}
@@ -5445,7 +5465,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) {
origDB.Exec(t, `CREATE DATABASE d`)
origDB.Exec(t, `CREATE TABLE d.t(a int)`)
origDB.Exec(t, `CREATE SEQUENCE d.seq OWNED BY d.t.a`)
- origDB.Exec(t, `BACKUP DATABASE d INTO $1`, backupLoc)
+ origDB.Exec(t, `BACKUP DATABASE d TO $1`, backupLoc)
getTableDescriptorFromTestCluster := func(tc *testcluster.TestCluster, database string, table string) catalog.TableDescriptor {
srv := tc.ApplicationLayer(0)
@@ -5460,7 +5480,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) {
newDB := sqlutils.MakeSQLRunner(tc.Conns[0])
- newDB.Exec(t, `RESTORE DATABASE d FROM LATEST IN $1`, backupLoc)
+ newDB.Exec(t, `RESTORE DATABASE d FROM $1`, backupLoc)
tableDesc := getTableDescriptorFromTestCluster(tc, "d", "t")
seqDesc := getTableDescriptorFromTestCluster(tc, "d", "seq")
@@ -5492,9 +5512,9 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) {
newDB.Exec(t, `CREATE DATABASE d`)
newDB.Exec(t, `USE d`)
newDB.ExpectErr(t, `pq: cannot restore sequence "seq" without referenced owner`,
- `RESTORE TABLE seq FROM LATEST IN $1`, backupLoc)
+ `RESTORE TABLE seq FROM $1`, backupLoc)
- newDB.Exec(t, `RESTORE TABLE seq FROM LATEST IN $1 WITH skip_missing_sequence_owners`, backupLoc)
+ newDB.Exec(t, `RESTORE TABLE seq FROM $1 WITH skip_missing_sequence_owners`, backupLoc)
seqDesc := getTableDescriptorFromTestCluster(tc, "d", "seq")
require.False(t, seqDesc.GetSequenceOpts().HasOwner(), "unexpected owner of restored sequence.")
})
@@ -5514,11 +5534,11 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) {
newDB.Exec(t, `CREATE DATABASE d`)
newDB.Exec(t, `USE d`)
newDB.ExpectErr(t, `pq: cannot restore sequence "seq" without referenced owner table`,
- `RESTORE TABLE seq FROM LATEST IN $1`, backupLoc)
+ `RESTORE TABLE seq FROM $1`, backupLoc)
newDB.ExpectErr(t, `pq: cannot restore table "t" without referenced sequence`,
- `RESTORE TABLE t FROM LATEST IN $1`, backupLoc)
- newDB.Exec(t, `RESTORE TABLE t FROM LATEST IN $1 WITH skip_missing_sequence_owners`, backupLoc)
+ `RESTORE TABLE t FROM $1`, backupLoc)
+ newDB.Exec(t, `RESTORE TABLE t FROM $1 WITH skip_missing_sequence_owners`, backupLoc)
tableDesc := getTableDescriptorFromTestCluster(tc, "d", "t")
@@ -5527,8 +5547,8 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) {
)
newDB.ExpectErr(t, `pq: cannot restore sequence "seq" without referenced owner table`,
- `RESTORE TABLE seq FROM LATEST IN $1`, backupLoc)
- newDB.Exec(t, `RESTORE TABLE seq FROM LATEST IN $1 WITH skip_missing_sequence_owners`, backupLoc)
+ `RESTORE TABLE seq FROM $1`, backupLoc)
+ newDB.Exec(t, `RESTORE TABLE seq FROM $1 WITH skip_missing_sequence_owners`, backupLoc)
seqDesc := getTableDescriptorFromTestCluster(tc, "d", "seq")
require.False(t, seqDesc.GetSequenceOpts().HasOwner(), "unexpected sequence owner after restore")
@@ -5543,7 +5563,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) {
newDB := sqlutils.MakeSQLRunner(tc.Conns[0])
newDB.Exec(t, `CREATE DATABASE restore_db`)
- newDB.Exec(t, `RESTORE TABLE d.* FROM LATEST IN $1 WITH into_db='restore_db'`, backupLoc)
+ newDB.Exec(t, `RESTORE d.* FROM $1 WITH into_db='restore_db'`, backupLoc)
tableDesc := getTableDescriptorFromTestCluster(tc, "restore_db", "t")
seqDesc := getTableDescriptorFromTestCluster(tc, "restore_db", "seq")
@@ -5577,7 +5597,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) {
origDB.Exec(t, `CREATE SEQUENCE d3.seq OWNED BY d2.t.a`)
origDB.Exec(t, `CREATE SEQUENCE d3.seq2 OWNED BY d3.t.a`)
- origDB.Exec(t, `BACKUP DATABASE d2, d3 INTO $1`, backupLocD2D3)
+ origDB.Exec(t, `BACKUP DATABASE d2, d3 TO $1`, backupLocD2D3)
// When restoring a database that has a sequence which is owned by a table
// in another database, the user must supply the
@@ -5592,8 +5612,8 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) {
newDB.ExpectErr(t, "pq: cannot restore sequence \"seq\" without referenced owner|"+
"pq: cannot restore table \"t\" without referenced sequence",
- `RESTORE DATABASE d2 FROM LATEST IN $1`, backupLocD2D3)
- newDB.Exec(t, `RESTORE DATABASE d2 FROM LATEST IN $1 WITH skip_missing_sequence_owners`, backupLocD2D3)
+ `RESTORE DATABASE d2 FROM $1`, backupLocD2D3)
+ newDB.Exec(t, `RESTORE DATABASE d2 FROM $1 WITH skip_missing_sequence_owners`, backupLocD2D3)
tableDesc := getTableDescriptorFromTestCluster(tc, "d2", "t")
require.Equal(t, 0, tableDesc.PublicColumns()[0].NumOwnsSequences(),
@@ -5602,8 +5622,8 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) {
newDB.ExpectErr(t, "pq: cannot restore sequence \"seq\" without referenced owner|"+
"pq: cannot restore table \"t\" without referenced sequence",
- `RESTORE DATABASE d3 FROM LATEST IN $1`, backupLocD2D3)
- newDB.Exec(t, `RESTORE DATABASE d3 FROM LATEST IN $1 WITH skip_missing_sequence_owners`, backupLocD2D3)
+ `RESTORE DATABASE d3 FROM $1`, backupLocD2D3)
+ newDB.Exec(t, `RESTORE DATABASE d3 FROM $1 WITH skip_missing_sequence_owners`, backupLocD2D3)
seqDesc := getTableDescriptorFromTestCluster(tc, "d3", "seq")
require.False(t, seqDesc.GetSequenceOpts().HasOwner(), "unexpected sequence owner after restore")
@@ -5634,7 +5654,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) {
newDB := sqlutils.MakeSQLRunner(tc.Conns[0])
- newDB.Exec(t, `RESTORE DATABASE d2, d3 FROM LATEST IN $1`, backupLocD2D3)
+ newDB.Exec(t, `RESTORE DATABASE d2, d3 FROM $1`, backupLocD2D3)
// d2.t owns d3.seq should be preserved.
tableDesc := getTableDescriptorFromTestCluster(tc, "d2", "t")
@@ -5701,7 +5721,7 @@ func TestBackupRestoreShowJob(t *testing.T) {
fullDir := getLatestFullDir(t, sqlDB, localFoo)
sqlDB.Exec(t, `CREATE DATABASE "data 2"`)
- sqlDB.Exec(t, `RESTORE TABLE data.bank FROM LATEST IN $1 WITH skip_missing_foreign_keys, into_db = $2`, localFoo, "data 2")
+ sqlDB.Exec(t, `RESTORE data.bank FROM LATEST IN $1 WITH skip_missing_foreign_keys, into_db = $2`, localFoo, "data 2")
// The "updating privileges" clause in the SELECT statement is for excluding jobs
// run by an unrelated startup migration.
// TODO (lucy): Update this if/when we decide to change how these jobs queued by
@@ -5729,9 +5749,9 @@ func TestBackupCreatedStats(t *testing.T) {
injectStats(t, sqlDB, "data.bank", "id")
injectStats(t, sqlDB, "data.foo", "a")
- sqlDB.Exec(t, `BACKUP TABLE data.bank, data.foo INTO $1 WITH revision_history`, localFoo)
+ sqlDB.Exec(t, `BACKUP data.bank, data.foo INTO $1 WITH revision_history`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE "data 2"`)
- sqlDB.Exec(t, `RESTORE TABLE data.bank, data.foo FROM LATEST IN $1 WITH skip_missing_foreign_keys, into_db = $2`,
+ sqlDB.Exec(t, `RESTORE data.bank, data.foo FROM LATEST IN $1 WITH skip_missing_foreign_keys, into_db = $2`,
localFoo, "data 2")
sqlDB.CheckQueryResults(t, getStatsQuery(`"data 2".bank`),
@@ -5770,12 +5790,12 @@ func TestBackupRestoreSubsetCreatedStats(t *testing.T) {
bankStats := injectStats(t, sqlDB, "data.bank", "id")
injectStats(t, sqlDB, "data.foo", "a")
- sqlDB.Exec(t, `BACKUP TABLE data.bank, data.foo INTO $1 WITH revision_history`, localFoo)
+ sqlDB.Exec(t, `BACKUP data.bank, data.foo INTO $1 WITH revision_history`, localFoo)
// Clear the stats.
sqlDB.Exec(t, `DELETE FROM system.table_statistics WHERE true`)
sqlDB.Exec(t, `CREATE DATABASE "data 2"`)
sqlDB.Exec(t, `CREATE TABLE "data 2".foo (a INT)`)
- sqlDB.Exec(t, `RESTORE TABLE data.bank FROM LATEST IN $1 WITH skip_missing_foreign_keys, into_db = $2`,
+ sqlDB.Exec(t, `RESTORE data.bank FROM LATEST IN $1 WITH skip_missing_foreign_keys, into_db = $2`,
localFoo, "data 2")
// Ensure that bank's stats have been restored, but foo's have not.
@@ -5963,7 +5983,7 @@ func TestBackupRestoreCorruptedStatsIgnored(t *testing.T) {
var tableID int
sqlDB.QueryRow(t, `SELECT id FROM system.namespace WHERE name = 'bank'`).Scan(&tableID)
- sqlDB.Exec(t, `BACKUP TABLE data.bank INTO $1`, dest)
+ sqlDB.Exec(t, `BACKUP data.bank TO $1`, dest)
// Overwrite the stats file with some invalid data.
ctx := context.Background()
@@ -5984,14 +6004,14 @@ func TestBackupRestoreCorruptedStatsIgnored(t *testing.T) {
kmsEnv, &statsTable))
sqlDB.Exec(t, `CREATE DATABASE "data 2"`)
- sqlDB.Exec(t, fmt.Sprintf(`RESTORE TABLE data.bank FROM LATEST IN "%s" WITH skip_missing_foreign_keys, into_db = "%s"`,
+ sqlDB.Exec(t, fmt.Sprintf(`RESTORE data.bank FROM "%s" WITH skip_missing_foreign_keys, into_db = "%s"`,
dest, "data 2"))
// Delete the stats file to ensure a restore can succeed even if statistics do
// not exist.
require.NoError(t, store.Delete(ctx, backupinfo.BackupStatisticsFileName))
sqlDB.Exec(t, `CREATE DATABASE "data 3"`)
- sqlDB.Exec(t, fmt.Sprintf(`RESTORE TABLE data.bank FROM LATEST IN "%s" WITH skip_missing_foreign_keys, into_db = "%s"`,
+ sqlDB.Exec(t, fmt.Sprintf(`RESTORE data.bank FROM "%s" WITH skip_missing_foreign_keys, into_db = "%s"`,
dest, "data 3"))
}
@@ -6009,21 +6029,21 @@ func TestBackupCreatedStatsFromIncrementalBackup(t *testing.T) {
// Create the 1st backup, with stats estimating 50 rows.
injectStatsWithRowCount(t, sqlDB, "data.bank", "id", 50 /* rowCount */)
- sqlDB.Exec(t, `BACKUP TABLE data.bank INTO $1 WITH revision_history`, localFoo)
+ sqlDB.Exec(t, `BACKUP data.bank INTO $1 WITH revision_history`, localFoo)
// Create the 2nd backup, with stats estimating 100 rows.
injectStatsWithRowCount(t, sqlDB, "data.bank", "id", 100 /* rowCount */)
statsBackup2 := sqlDB.QueryStr(t, getStatsQuery("data.bank"))
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&beforeTs) // Save time to restore to this point.
- sqlDB.Exec(t, `BACKUP TABLE data.bank INTO LATEST IN $1 WITH revision_history`, localFoo)
+ sqlDB.Exec(t, `BACKUP data.bank INTO LATEST IN $1 WITH revision_history`, localFoo)
// Create the 3rd backup, with stats estimating 500 rows.
injectStatsWithRowCount(t, sqlDB, "data.bank", "id", 500 /* rowCount */)
- sqlDB.Exec(t, `BACKUP TABLE data.bank INTO LATEST IN $1 WITH revision_history`, localFoo)
+ sqlDB.Exec(t, `BACKUP data.bank INTO LATEST IN $1 WITH revision_history`, localFoo)
// Restore the 2nd backup.
sqlDB.Exec(t, `CREATE DATABASE "data 2"`)
- sqlDB.Exec(t, fmt.Sprintf(`RESTORE TABLE data.bank FROM LATEST IN "%s" AS OF SYSTEM TIME %s WITH skip_missing_foreign_keys, into_db = "%s"`,
+ sqlDB.Exec(t, fmt.Sprintf(`RESTORE data.bank FROM LATEST IN "%s" AS OF SYSTEM TIME %s WITH skip_missing_foreign_keys, into_db = "%s"`,
localFoo, beforeTs, "data 2"))
// Expect the stats look as they did in the second backup.
@@ -6395,9 +6415,9 @@ func TestRestoreErrorPropagates(t *testing.T) {
runner.Exec(t, "CREATE TABLE foo ()")
runner.Exec(t, "CREATE DATABASE into_db")
url := `nodelocal://1/foo`
- runner.Exec(t, `BACKUP TABLE foo INTO '`+url+`'`)
+ runner.Exec(t, `BACKUP TABLE foo to '`+url+`'`)
shouldFail.Store(true)
- _, err := db.Exec(`RESTORE TABLE foo FROM LATEST IN '` + url + `' WITH into_db = 'into_db'`)
+ _, err := db.Exec(`RESTORE TABLE foo FROM '` + url + `' WITH into_db = 'into_db'`)
// Expect to see the first job write failure.
require.Regexp(t, "boom 1", err)
}
@@ -6427,7 +6447,7 @@ func TestProtectedTimestampsFailDueToLimits(t *testing.T) {
// Creating the protected timestamp record should fail because there are too
// many spans. Ensure that we get the appropriate error.
- _, err := db.Exec(`BACKUP TABLE foo, bar INTO 'nodelocal://1/foo/byte-limit'`)
+ _, err := db.Exec(`BACKUP TABLE foo, bar TO 'nodelocal://1/foo/byte-limit'`)
require.ErrorContains(t, err, "pq: protectedts: limit exceeded")
// TODO(adityamaru): Remove in 22.2 once no records protect spans.
@@ -6451,7 +6471,7 @@ func TestProtectedTimestampsFailDueToLimits(t *testing.T) {
// Creating the protected timestamp record should fail because there are too
// many spans. Ensure that we get the appropriate error.
- _, err := db.Exec(`BACKUP TABLE foo, bar INTO 'nodelocal://1/foo/spans-limit'`)
+ _, err := db.Exec(`BACKUP TABLE foo, bar TO 'nodelocal://1/foo/spans-limit'`)
require.EqualError(t, err, "pq: protectedts: limit exceeded: 0+2 > 1 spans")
})
}
@@ -6572,7 +6592,7 @@ INSERT INTO foo.bar VALUES (110), (210), (310), (410), (510)`)
return roachpb.Span{Key: mkKey(id, start), EndKey: mkKey(id, end)}
}
- tenant10.Exec(t, `BACKUP DATABASE foo INTO 'userfile://defaultdb.myfililes/test'`)
+ tenant10.Exec(t, `BACKUP DATABASE foo TO 'userfile://defaultdb.myfililes/test'`)
startingSpan := mkSpan(id1, "/Tenant/10/Table/:id/1", "/Tenant/10/Table/:id/2")
mu.Lock()
require.Equal(t, []string{startingSpan.String()}, mu.exportRequestSpans)
@@ -6581,7 +6601,7 @@ INSERT INTO foo.bar VALUES (110), (210), (310), (410), (510)`)
// Two ExportRequests with one resume span.
systemDB.Exec(t, `SET CLUSTER SETTING kv.bulk_sst.target_size='50b'`)
- tenant10.Exec(t, `BACKUP DATABASE foo INTO 'userfile://defaultdb.myfililes/test2'`)
+ tenant10.Exec(t, `BACKUP DATABASE foo TO 'userfile://defaultdb.myfililes/test2'`)
startingSpan = mkSpan(id1, "/Tenant/10/Table/:id/1", "/Tenant/10/Table/:id/2")
resumeSpan := mkSpan(id1, "/Tenant/10/Table/:id/1/510", "/Tenant/10/Table/:id/2")
mu.Lock()
@@ -6591,7 +6611,7 @@ INSERT INTO foo.bar VALUES (110), (210), (310), (410), (510)`)
// One ExportRequest for every KV.
systemDB.Exec(t, `SET CLUSTER SETTING kv.bulk_sst.target_size='10b'`)
- tenant10.Exec(t, `BACKUP DATABASE foo INTO 'userfile://defaultdb.myfililes/test3'`)
+ tenant10.Exec(t, `BACKUP DATABASE foo TO 'userfile://defaultdb.myfililes/test3'`)
var expected []string
for _, resume := range []exportResumePoint{
{mkSpan(id1, "/Tenant/10/Table/:id/1", "/Tenant/10/Table/:id/2"), withoutTS},
@@ -6621,7 +6641,7 @@ INSERT INTO baz.bar VALUES (110, 'a'), (210, 'b'), (310, 'c'), (410, 'd'), (510,
// Test mid key breaks for the tenant to verify timestamps on resume.
tenant10.Exec(t, `UPDATE baz.bar SET v = 'z' WHERE i = 210`)
- tenant10.Exec(t, `BACKUP DATABASE baz INTO 'userfile://defaultdb.myfililes/test4' with revision_history`)
+ tenant10.Exec(t, `BACKUP DATABASE baz TO 'userfile://defaultdb.myfililes/test4' with revision_history`)
expected = nil
for _, resume := range []exportResumePoint{
{mkSpan(id2, "/Tenant/10/Table/3", "/Tenant/10/Table/4"), withoutTS},
@@ -6715,17 +6735,17 @@ func TestBackupRestoreInsideTenant(t *testing.T) {
t.Run("database-restore", func(t *testing.T) {
t.Run("into-same-tenant-id", func(t *testing.T) {
tenant10.Exec(t, `CREATE DATABASE foo2`)
- tenant10.Exec(t, `RESTORE TABLE foo.bar FROM LATEST IN $1 WITH into_db='foo2'`, httpAddr)
+ tenant10.Exec(t, `RESTORE foo.bar FROM LATEST IN $1 WITH into_db='foo2'`, httpAddr)
tenant10.CheckQueryResults(t, `SELECT * FROM foo2.bar`, tenant10.QueryStr(t, `SELECT * FROM foo.bar`))
})
t.Run("into-different-tenant-id", func(t *testing.T) {
tenant11.Exec(t, `CREATE DATABASE foo`)
- tenant11.Exec(t, `RESTORE TABLE foo.bar FROM LATEST IN $1`, httpAddr)
+ tenant11.Exec(t, `RESTORE foo.bar FROM LATEST IN $1`, httpAddr)
tenant11.CheckQueryResults(t, `SELECT * FROM foo.bar`, tenant10.QueryStr(t, `SELECT * FROM foo.bar`))
})
t.Run("into-system-tenant-id", func(t *testing.T) {
systemDB.Exec(t, `CREATE DATABASE foo2`)
- systemDB.Exec(t, `RESTORE TABLE foo.bar FROM LATEST IN $1 WITH into_db='foo2'`, httpAddr)
+ systemDB.Exec(t, `RESTORE foo.bar FROM LATEST IN $1 WITH into_db='foo2'`, httpAddr)
systemDB.CheckQueryResults(t, `SELECT * FROM foo2.bar`, tenant10.QueryStr(t, `SELECT * FROM foo.bar`))
})
})
@@ -6761,7 +6781,7 @@ func TestBackupRestoreInsideTenant(t *testing.T) {
t.Run("database-restore-into-tenant", func(t *testing.T) {
tenant10.Exec(t, `CREATE DATABASE data`)
- tenant10.Exec(t, `RESTORE TABLE data.bank FROM LATEST IN $1`, httpAddr)
+ tenant10.Exec(t, `RESTORE data.bank FROM LATEST IN $1`, httpAddr)
systemDB.CheckQueryResults(t, `SELECT * FROM data.bank`, tenant10.QueryStr(t, `SELECT * FROM data.bank`))
})
@@ -6888,17 +6908,17 @@ func TestBackupRestoreInsideMultiPodTenant(t *testing.T) {
t.Run("database-restore", func(t *testing.T) {
t.Run("into-same-tenant-id", func(t *testing.T) {
tenant10[0].Exec(t, `CREATE DATABASE foo2`)
- tenant10[0].Exec(t, `RESTORE TABLE foo.bar FROM LATEST IN $1 WITH into_db='foo2'`, httpAddr)
+ tenant10[0].Exec(t, `RESTORE foo.bar FROM LATEST IN $1 WITH into_db='foo2'`, httpAddr)
tenant10[0].CheckQueryResults(t, `SELECT * FROM foo2.bar`, tenant10[0].QueryStr(t, `SELECT * FROM foo.bar`))
})
t.Run("into-different-tenant-id", func(t *testing.T) {
tenant11[0].Exec(t, `CREATE DATABASE foo`)
- tenant11[0].Exec(t, `RESTORE TABLE foo.bar FROM LATEST IN $1`, httpAddr)
+ tenant11[0].Exec(t, `RESTORE foo.bar FROM LATEST IN $1`, httpAddr)
tenant11[0].CheckQueryResults(t, `SELECT * FROM foo.bar`, tenant10[0].QueryStr(t, `SELECT * FROM foo.bar`))
})
t.Run("into-system-tenant-id", func(t *testing.T) {
systemDB.Exec(t, `CREATE DATABASE foo2`)
- systemDB.Exec(t, `RESTORE TABLE foo.bar FROM LATEST IN $1 WITH into_db='foo2'`, httpAddr)
+ systemDB.Exec(t, `RESTORE foo.bar FROM LATEST IN $1 WITH into_db='foo2'`, httpAddr)
systemDB.CheckQueryResults(t, `SELECT * FROM foo2.bar`, tenant10[0].QueryStr(t, `SELECT * FROM foo.bar`))
})
})
@@ -6934,7 +6954,7 @@ func TestBackupRestoreInsideMultiPodTenant(t *testing.T) {
t.Run("database-restore-into-tenant", func(t *testing.T) {
tenant10[0].Exec(t, `CREATE DATABASE data`)
- tenant10[0].Exec(t, `RESTORE TABLE data.bank FROM LATEST IN $1`, httpAddr)
+ tenant10[0].Exec(t, `RESTORE data.bank FROM LATEST IN $1`, httpAddr)
systemDB.CheckQueryResults(t, `SELECT * FROM data.bank`, tenant10[0].QueryStr(t, `SELECT * FROM data.bank`))
})
})
@@ -7063,8 +7083,8 @@ func TestBackupRestoreTenant(t *testing.T) {
(0, 'tenant_cost_model.read_payload_cost_per_mebibyte', '123', 'f'),
(10, 'tenant_cost_model.write_payload_cost_per_mebibyte', '456', 'f')`)
- systemDB.Exec(t, `BACKUP TABLE system.users INTO 'nodelocal://1/users'`)
- systemDB.CheckQueryResults(t, `SELECT manifest->>'tenants' FROM [SHOW BACKUP FROM LATEST IN 'nodelocal://1/users' WITH as_json]`, [][]string{{"[]"}})
+ systemDB.Exec(t, `BACKUP system.users TO 'nodelocal://1/users'`)
+ systemDB.CheckQueryResults(t, `SELECT manifest->>'tenants' FROM [SHOW BACKUP 'nodelocal://1/users' WITH as_json]`, [][]string{{"[]"}})
_, conn11 := serverutils.StartTenant(t, srv, base.TestTenantArgs{TenantID: roachpb.MustMakeTenantID(11)})
defer conn11.Close()
@@ -7083,31 +7103,30 @@ func TestBackupRestoreTenant(t *testing.T) {
systemDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts2)
// BACKUP tenant 10 at ts1, before they created bar2.
- systemDB.Exec(t, fmt.Sprintf(`BACKUP TENANT 10 INTO 'nodelocal://1/t10' AS OF SYSTEM TIME %s`, ts1))
- ts1BackupPath := getFullBackupPaths(t, systemDB, "nodelocal://1/t10")[0]
+ systemDB.Exec(t, `BACKUP TENANT 10 TO 'nodelocal://1/t10' AS OF SYSTEM TIME `+ts1)
// Also create a full cluster backup. It should contain the tenant.
- systemDB.Exec(t, fmt.Sprintf("BACKUP INTO 'nodelocal://1/clusterwide' AS OF SYSTEM TIME %s WITH include_all_virtual_clusters", ts1))
+ systemDB.Exec(t, fmt.Sprintf("BACKUP TO 'nodelocal://1/clusterwide' AS OF SYSTEM TIME %s WITH include_all_virtual_clusters", ts1))
// Incrementally backup tenant 10 again, capturing up to ts2.
- systemDB.Exec(t, fmt.Sprintf(`BACKUP TENANT 10 INTO 'nodelocal://1/t10' AS OF SYSTEM TIME %s`, ts2))
+ systemDB.Exec(t, `BACKUP TENANT 10 TO 'nodelocal://1/t10' AS OF SYSTEM TIME `+ts2)
// Run full cluster backup incrementally to ts2 as well.
- systemDB.Exec(t, fmt.Sprintf("BACKUP INTO 'nodelocal://1/clusterwide' AS OF SYSTEM TIME %s WITH include_all_virtual_clusters", ts2))
+ systemDB.Exec(t, fmt.Sprintf("BACKUP TO 'nodelocal://1/clusterwide' AS OF SYSTEM TIME %s WITH include_all_virtual_clusters", ts2))
- systemDB.Exec(t, `BACKUP TENANT 11 INTO 'nodelocal://1/t11'`)
- systemDB.Exec(t, `BACKUP TENANT 20 INTO 'nodelocal://1/t20'`)
+ systemDB.Exec(t, `BACKUP TENANT 11 TO 'nodelocal://1/t11'`)
+ systemDB.Exec(t, `BACKUP TENANT 20 TO 'nodelocal://1/t20'`)
t.Run("non-existent", func(t *testing.T) {
- systemDB.ExpectErr(t, "tenant 123 does not exist", `BACKUP TENANT 123 INTO 'nodelocal://1/t1'`)
- systemDB.ExpectErr(t, "tenant 21 does not exist", `BACKUP TENANT 21 INTO 'nodelocal://1/t20'`)
- systemDB.ExpectErr(t, "tenant 21 not in backup", `RESTORE TENANT 21 FROM LATEST IN 'nodelocal://1/t20'`)
- systemDB.ExpectErr(t, "file does not exist", `RESTORE TENANT 21 FROM LATEST IN 'nodelocal://1/t21'`)
+ systemDB.ExpectErr(t, "tenant 123 does not exist", `BACKUP TENANT 123 TO 'nodelocal://1/t1'`)
+ systemDB.ExpectErr(t, "tenant 21 does not exist", `BACKUP TENANT 21 TO 'nodelocal://1/t20'`)
+ systemDB.ExpectErr(t, "tenant 21 not in backup", `RESTORE TENANT 21 FROM 'nodelocal://1/t20'`)
+ systemDB.ExpectErr(t, "file does not exist", `RESTORE TENANT 21 FROM 'nodelocal://1/t21'`)
})
t.Run("invalid", func(t *testing.T) {
- systemDB.ExpectErr(t, "invalid tenant ID", `BACKUP TENANT 0 INTO 'nodelocal://1/z'`)
- systemDB.ExpectErr(t, "tenant 123 does not exist", `BACKUP TENANT 123 INTO 'nodelocal://1/z'`)
- systemDB.ExpectErr(t, "syntax error", `BACKUP TENANT system INTO 'nodelocal://1/z'`)
+ systemDB.ExpectErr(t, "invalid tenant ID", `BACKUP TENANT 0 TO 'nodelocal://1/z'`)
+ systemDB.ExpectErr(t, "tenant 123 does not exist", `BACKUP TENANT 123 TO 'nodelocal://1/z'`)
+ systemDB.ExpectErr(t, "syntax error", `BACKUP TENANT system TO 'nodelocal://1/z'`)
})
t.Run("restore-tenant10-to-latest", func(t *testing.T) {
@@ -7129,7 +7148,7 @@ func TestBackupRestoreTenant(t *testing.T) {
`{}`,
},
})
- restoreDB.Exec(t, `RESTORE TENANT 10 FROM LATEST IN 'nodelocal://1/t10'`)
+ restoreDB.Exec(t, `RESTORE TENANT 10 FROM 'nodelocal://1/t10'`)
restoreDB.CheckQueryResults(t,
`SELECT id, active, name, data_state, service_mode,
crdb_internal.pb_to_json('cockroach.multitenant.ProtoInfo', info)->'capabilities',
@@ -7211,7 +7230,7 @@ func TestBackupRestoreTenant(t *testing.T) {
require.NoError(t, err)
require.Equal(t, []kv.KeyValue{}, rows)
- restoreDB.Exec(t, `RESTORE TENANT 10 FROM LATEST IN 'nodelocal://1/t10'`)
+ restoreDB.Exec(t, `RESTORE TENANT 10 FROM 'nodelocal://1/t10'`)
restoreDB.CheckQueryResults(t,
`select id, active, name, data_state, service_mode,
crdb_internal.pb_to_json('cockroach.multitenant.ProtoInfo', info)->'capabilities'
@@ -7267,7 +7286,7 @@ func TestBackupRestoreTenant(t *testing.T) {
`{}`,
},
})
- restoreDB.Exec(t, `RESTORE TENANT 10 FROM LATEST IN 'nodelocal://1/t10'`)
+ restoreDB.Exec(t, `RESTORE TENANT 10 FROM 'nodelocal://1/t10'`)
restoreDB.CheckQueryResults(t,
`select id, active, name, data_state, service_mode,
crdb_internal.pb_to_json('cockroach.multitenant.ProtoInfo', info)->'capabilities'
@@ -7311,7 +7330,7 @@ func TestBackupRestoreTenant(t *testing.T) {
`{}`,
},
})
- restoreDB.Exec(t, `RESTORE TENANT 10 FROM LATEST IN 'nodelocal://1/clusterwide'`)
+ restoreDB.Exec(t, `RESTORE TENANT 10 FROM 'nodelocal://1/clusterwide'`)
restoreDB.CheckQueryResults(t,
`select id, active, name, data_state, service_mode,
crdb_internal.pb_to_json('cockroach.multitenant.ProtoInfo', info)->'capabilities'
@@ -7357,7 +7376,7 @@ func TestBackupRestoreTenant(t *testing.T) {
defer restoreTC.Stopper().Stop(ctx)
restoreDB := sqlutils.MakeSQLRunner(restoreTC.Conns[0])
- restoreDB.Exec(t, `RESTORE TENANT 10 FROM $1 IN 'nodelocal://1/t10'`, ts1BackupPath)
+ restoreDB.Exec(t, `RESTORE TENANT 10 FROM 'nodelocal://1/t10' AS OF SYSTEM TIME `+ts1)
restoreDB.CheckQueryResults(t,
`select id, name,
@@ -7393,7 +7412,7 @@ func TestBackupRestoreTenant(t *testing.T) {
defer restoreTC.Stopper().Stop(ctx)
restoreDB := sqlutils.MakeSQLRunner(restoreTC.Conns[0])
- restoreDB.Exec(t, `RESTORE TENANT 20 FROM LATEST IN 'nodelocal://1/t20'`)
+ restoreDB.Exec(t, `RESTORE TENANT 20 FROM 'nodelocal://1/t20'`)
tenantID := roachpb.MustMakeTenantID(20)
if err := restoreTC.Server(0).TenantController().WaitForTenantReadiness(ctx, tenantID); err != nil {
@@ -7424,11 +7443,11 @@ func TestClientDisconnect(t *testing.T) {
}{
{
jobType: "BACKUP",
- jobCommand: fmt.Sprintf("BACKUP INTO '%s'", localFoo),
+ jobCommand: fmt.Sprintf("BACKUP TO '%s'", localFoo),
},
{
jobType: "RESTORE",
- jobCommand: fmt.Sprintf("RESTORE TABLE data.* FROM LATEST IN '%s' WITH into_db='%s'", localFoo, restoreDB),
+ jobCommand: fmt.Sprintf("RESTORE data.* FROM '%s' WITH into_db='%s'", localFoo, restoreDB),
},
}
@@ -7481,7 +7500,7 @@ func TestClientDisconnect(t *testing.T) {
if testCase.jobType == "RESTORE" {
close(allowResponse)
sqlDB.Exec(t, fmt.Sprintf("CREATE DATABASE %s", restoreDB))
- sqlDB.Exec(t, "BACKUP INTO $1", localFoo)
+ sqlDB.Exec(t, "BACKUP TO $1", localFoo)
// Reset the channels. There will be a request on the gotRequest channel
// due to the backup.
allowResponse = make(chan struct{})
@@ -7583,7 +7602,7 @@ func TestBackupExportRequestTimeout(t *testing.T) {
// Backup should go through the motions of attempting to run a high priority
// export request but since the intent was laid by a high priority txn it
// should hang. The timeout should save us in this case.
- _, err := sqlSessions[1].DB.ExecContext(ctx, "BACKUP TABLE data.bank INTO 'nodelocal://1/timeout'")
+ _, err := sqlSessions[1].DB.ExecContext(ctx, "BACKUP data.bank TO 'nodelocal://1/timeout'")
require.Regexp(t,
`running distributed backup to export.*/Table/\d+/.*\: context deadline exceeded`,
err.Error())
@@ -7619,7 +7638,7 @@ func TestBackupDoesNotHangOnIntent(t *testing.T) {
}
// backup the table in which we have our intent.
- _, err = sqlDB.DB.ExecContext(ctx, "BACKUP TABLE data.bank INTO 'nodelocal://1/intent'")
+ _, err = sqlDB.DB.ExecContext(ctx, "BACKUP data.bank TO 'nodelocal://1/intent'")
require.NoError(t, err)
// observe that the backup aborted our txn.
@@ -7653,9 +7672,9 @@ CREATE TABLE db.table (k INT PRIMARY KEY, v db.typ);
`)
// Back up the database, drop it, and restore into it.
- sqlDB.Exec(t, `BACKUP DATABASE db INTO 'nodelocal://1/test/1'`)
+ sqlDB.Exec(t, `BACKUP DATABASE db TO 'nodelocal://1/test/1'`)
sqlDB.Exec(t, `DROP DATABASE db`)
- sqlDB.ExpectErr(t, "boom", `RESTORE DATABASE db FROM LATEST IN 'nodelocal://1/test/1'`)
+ sqlDB.ExpectErr(t, "boom", `RESTORE DATABASE db FROM 'nodelocal://1/test/1'`)
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM system.namespace WHERE name = 'typ'`, [][]string{{"0"}})
// Back up database with user defined schema.
@@ -7667,9 +7686,9 @@ CREATE TABLE db.s.table (k INT PRIMARY KEY, v db.s.typ);
`)
// Back up the database, drop it, and restore into it.
- sqlDB.Exec(t, `BACKUP DATABASE db INTO 'nodelocal://1/test/2'`)
+ sqlDB.Exec(t, `BACKUP DATABASE db TO 'nodelocal://1/test/2'`)
sqlDB.Exec(t, `DROP DATABASE db`)
- sqlDB.ExpectErr(t, "boom", `RESTORE DATABASE db FROM LATEST IN 'nodelocal://1/test/2'`)
+ sqlDB.ExpectErr(t, "boom", `RESTORE DATABASE db FROM 'nodelocal://1/test/2'`)
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM system.namespace WHERE name = 'typ'`, [][]string{{"0"}})
}
@@ -7702,11 +7721,11 @@ CREATE TYPE sc.typ AS ENUM ('hello');
ALTER TYPE sc.typ ADD VALUE 'hi';
`)
// Back up the database.
- sqlDB.Exec(t, `BACKUP DATABASE d INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://1/test/'`)
// Drop the database and restore into it.
sqlDB.Exec(t, `DROP DATABASE d`)
- sqlDB.Exec(t, `RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `RESTORE DATABASE d FROM 'nodelocal://1/test/'`)
codec := tc.ApplicationLayer(0).Codec()
dbDesc := desctestutils.TestingGetDatabaseDescriptor(kvDB, codec, "d")
@@ -7787,7 +7806,7 @@ CREATE FUNCTION f() RETURNS INT AS $$ SELECT 1 $$ LANGUAGE SQL;
`)
// Back up the database.
- sqlDB.Exec(t, `BACKUP DATABASE d INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://1/test/'`)
// Drop the database and restore into it.
sqlDB.Exec(t, `DROP DATABASE d`)
@@ -7795,7 +7814,7 @@ CREATE FUNCTION f() RETURNS INT AS $$ SELECT 1 $$ LANGUAGE SQL;
beforePublishingNotif, continueNotif := initBackfillNotification()
g := ctxgroup.WithContext(ctx)
g.GoCtx(func(ctx context.Context) error {
- if _, err := sqlDB.DB.ExecContext(ctx, `RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/test/'`); err != nil {
+ if _, err := sqlDB.DB.ExecContext(ctx, `RESTORE DATABASE d FROM 'nodelocal://1/test/'`); err != nil {
t.Fatal(err)
}
return nil
@@ -7901,7 +7920,7 @@ CREATE TYPE sc.typ AS ENUM ('hello');
`)
// Back up the database.
- sqlDB.Exec(t, `BACKUP DATABASE d INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://1/test/'`)
// Drop the database.
sqlDB.Exec(t, `DROP DATABASE d`)
@@ -7912,7 +7931,7 @@ CREATE TYPE sc.typ AS ENUM ('hello');
beforePublishingNotif, continueNotif := initBackfillNotification()
g := ctxgroup.WithContext(ctx)
g.GoCtx(func(ctx context.Context) error {
- if _, err := sqlDB.DB.ExecContext(ctx, `RESTORE TABLE d.* FROM LATEST IN 'nodelocal://1/test/' WITH into_db='newdb'`); err != nil {
+ if _, err := sqlDB.DB.ExecContext(ctx, `RESTORE d.* FROM 'nodelocal://1/test/' WITH into_db='newdb'`); err != nil {
t.Fatal(err)
}
return nil
@@ -8008,7 +8027,7 @@ CREATE TABLE d.sc.tb (x d.sc.typ);
`)
// Back up the table.
- sqlDB.Exec(t, `BACKUP TABLE d.sc.tb INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP TABLE d.sc.tb TO 'nodelocal://1/test/'`)
// Drop the table and the type.
sqlDB.Exec(t, `DROP TABLE d.sc.tb`)
@@ -8022,7 +8041,7 @@ CREATE TABLE d.sc.tb (x d.sc.typ);
beforePublishingNotif, continueNotif := initBackfillNotification()
g := ctxgroup.WithContext(ctx)
g.GoCtx(func(ctx context.Context) error {
- if _, err := sqlDB.DB.ExecContext(ctx, `RESTORE TABLE d.sc.tb FROM LATEST IN 'nodelocal://1/test/' WITH into_db = 'newdb'`); err != nil {
+ if _, err := sqlDB.DB.ExecContext(ctx, `RESTORE TABLE d.sc.tb FROM 'nodelocal://1/test/' WITH into_db = 'newdb'`); err != nil {
t.Fatal(err)
}
return nil
@@ -8143,7 +8162,7 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) {
`)
// Back up the database.
- sqlDB.Exec(t, `BACKUP DATABASE d INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://1/test/'`)
// Drop the database and restore into it.
sqlDB.Exec(t, `DROP DATABASE d`)
@@ -8151,7 +8170,7 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) {
afterPublishNotif, continueNotif := notifyAfterPublishing()
g := ctxgroup.WithContext(ctx)
g.GoCtx(func(ctx context.Context) error {
- _, err := sqlDB.DB.ExecContext(ctx, `RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/test/'`)
+ _, err := sqlDB.DB.ExecContext(ctx, `RESTORE DATABASE d FROM 'nodelocal://1/test/'`)
require.Regexp(t, "injected error", err)
return nil
})
@@ -8205,7 +8224,7 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) {
`)
// Back up the database.
- sqlDB.Exec(t, `BACKUP DATABASE d INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://1/test/'`)
// Drop the database and restore into it.
sqlDB.Exec(t, `DROP DATABASE d`)
@@ -8213,7 +8232,7 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) {
afterPublishNotif, continueNotif := notifyAfterPublishing()
g := ctxgroup.WithContext(ctx)
g.GoCtx(func(ctx context.Context) error {
- _, err := sqlDB.DB.ExecContext(ctx, `RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/test/'`)
+ _, err := sqlDB.DB.ExecContext(ctx, `RESTORE DATABASE d FROM 'nodelocal://1/test/'`)
require.Regexp(t, "injected error", err)
return nil
})
@@ -8272,7 +8291,7 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) {
`)
// Back up the database.
- sqlDB.Exec(t, `BACKUP DATABASE olddb INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP DATABASE olddb TO 'nodelocal://1/test/'`)
// Drop the database.
sqlDB.Exec(t, `DROP DATABASE olddb`)
@@ -8285,7 +8304,7 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) {
g.GoCtx(func(ctx context.Context) error {
conn, err := tc.Conns[0].Conn(ctx)
require.NoError(t, err)
- _, err = conn.ExecContext(ctx, `RESTORE TABLE olddb.* FROM LATEST IN 'nodelocal://1/test/' WITH into_db='newdb'`)
+ _, err = conn.ExecContext(ctx, `RESTORE olddb.* FROM 'nodelocal://1/test/' WITH into_db='newdb'`)
require.Regexp(t, "injected error", err)
return nil
})
@@ -8335,7 +8354,7 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) {
`)
// Back up the database.
- sqlDB.Exec(t, `BACKUP DATABASE d INTO 'nodelocal://1/test/'`)
+ sqlDB.Exec(t, `BACKUP DATABASE d TO 'nodelocal://1/test/'`)
// Drop the database and restore into it.
sqlDB.Exec(t, `DROP DATABASE d`)
@@ -8343,7 +8362,7 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) {
afterPublishNotif, continueNotif := notifyAfterPublishing()
g := ctxgroup.WithContext(ctx)
g.GoCtx(func(ctx context.Context) error {
- _, err := sqlDB.DB.ExecContext(ctx, `RESTORE DATABASE d FROM LATEST IN 'nodelocal://1/test/'`)
+ _, err := sqlDB.DB.ExecContext(ctx, `RESTORE DATABASE d FROM 'nodelocal://1/test/'`)
require.Regexp(t, "injected error", err)
return nil
})
@@ -8481,16 +8500,14 @@ func TestRestoringAcrossVersions(t *testing.T) {
sqlDB.Exec(t, `SET CLUSTER SETTING backup.write_metadata_with_external_ssts.enabled=true`)
sqlDB.Exec(t, `CREATE DATABASE r1`)
- sqlDB.Exec(t, `BACKUP DATABASE r1 INTO 'nodelocal://1/cross_version'`)
+ sqlDB.Exec(t, `BACKUP DATABASE r1 TO 'nodelocal://1/cross_version'`)
sqlDB.Exec(t, `DROP DATABASE r1`)
// Prove we can restore.
- sqlDB.Exec(t, `RESTORE DATABASE r1 FROM LATEST IN 'nodelocal://1/cross_version'`)
+ sqlDB.Exec(t, `RESTORE DATABASE r1 FROM 'nodelocal://1/cross_version'`)
sqlDB.Exec(t, `DROP DATABASE r1`)
- backupPath := getFullBackupPaths(t, sqlDB, "nodelocal://1/cross_version")[0]
-
// Load/deserialize the manifest so we can mess with it.
- manifestPath := filepath.Join(rawDir, "cross_version", backupPath, backupbase.BackupMetadataName)
+ manifestPath := filepath.Join(rawDir, "cross_version", backupbase.BackupMetadataName)
manifestData, err := os.ReadFile(manifestPath)
require.NoError(t, err)
manifestData, err = backupinfo.DecompressData(context.Background(), mon.NewStandaloneUnlimitedAccount(), manifestData)
@@ -8511,7 +8528,7 @@ func TestRestoringAcrossVersions(t *testing.T) {
t.Run("restore-same-version", func(t *testing.T) {
// Prove we can restore a backup taken on our current version.
- sqlDB.Exec(t, `RESTORE DATABASE r1 FROM LATEST IN 'nodelocal://1/cross_version'`)
+ sqlDB.Exec(t, `RESTORE DATABASE r1 FROM 'nodelocal://1/cross_version'`)
sqlDB.Exec(t, `DROP DATABASE r1`)
})
@@ -8521,7 +8538,7 @@ func TestRestoringAcrossVersions(t *testing.T) {
// Verify we reject it.
sqlDB.ExpectErr(t, "backup from version 2147483647.1 is newer than current version",
- `RESTORE DATABASE r1 FROM LATEST IN 'nodelocal://1/cross_version'`)
+ `RESTORE DATABASE r1 FROM 'nodelocal://1/cross_version'`)
})
t.Run("restore-older-major-version", func(t *testing.T) {
@@ -8535,7 +8552,7 @@ func TestRestoringAcrossVersions(t *testing.T) {
// Verify we reject it.
sqlDB.ExpectErr(t,
fmt.Sprintf("backup from version %s is older than the minimum restorable version", minSupportedVersion.String()),
- `RESTORE DATABASE r1 FROM LATEST IN 'nodelocal://1/cross_version'`)
+ `RESTORE DATABASE r1 FROM 'nodelocal://1/cross_version'`)
})
t.Run("restore-min-binary-version", func(t *testing.T) {
@@ -8544,7 +8561,7 @@ func TestRestoringAcrossVersions(t *testing.T) {
// version policy.
minSupportedVersion := tc.ApplicationLayer(0).ClusterSettings().Version.MinSupportedVersion()
setManifestClusterVersion(minSupportedVersion)
- sqlDB.Exec(t, `RESTORE DATABASE r1 FROM LATEST IN 'nodelocal://1/cross_version'`)
+ sqlDB.Exec(t, `RESTORE DATABASE r1 FROM 'nodelocal://1/cross_version'`)
sqlDB.Exec(t, `DROP DATABASE r1`)
})
@@ -8554,14 +8571,14 @@ func TestRestoringAcrossVersions(t *testing.T) {
// Verify we reject it.
sqlDB.ExpectErr(t, "the backup is from a version older than our minimum restorable version",
- `RESTORE DATABASE r1 FROM LATEST IN 'nodelocal://1/cross_version'`)
+ `RESTORE DATABASE r1 FROM 'nodelocal://1/cross_version'`)
})
t.Run("restore-nil-version-after-pause", func(t *testing.T) {
minSupportedVersion := tc.ApplicationLayer(0).ClusterSettings().Version.MinSupportedVersion()
setManifestClusterVersion(minSupportedVersion)
sqlDB.Exec(t, "SET CLUSTER SETTING jobs.debug.pausepoints = 'restore.before_load_descriptors_from_backup'")
var jobID int
- sqlDB.QueryRow(t, `RESTORE DATABASE r1 FROM LATEST IN 'nodelocal://1/cross_version' with DETACHED`).Scan(&jobID)
+ sqlDB.QueryRow(t, `RESTORE DATABASE r1 FROM 'nodelocal://1/cross_version' with DETACHED`).Scan(&jobID)
jobutils.WaitForJobToPause(t, sqlDB, jobspb.JobID(jobID))
setManifestClusterVersion(roachpb.Version{})
@@ -8575,7 +8592,7 @@ func TestRestoringAcrossVersions(t *testing.T) {
})
t.Run("restore-nil-version-unsafe", func(t *testing.T) {
var jobID int
- sqlDB.QueryRow(t, `RESTORE DATABASE r1 FROM LATEST IN 'nodelocal://1/cross_version' with DETACHED, UNSAFE_RESTORE_INCOMPATIBLE_VERSION`).Scan(&jobID)
+ sqlDB.QueryRow(t, `RESTORE DATABASE r1 FROM 'nodelocal://1/cross_version' with DETACHED, UNSAFE_RESTORE_INCOMPATIBLE_VERSION`).Scan(&jobID)
jobutils.WaitForJobToSucceed(t, sqlDB, jobspb.JobID(jobID))
payload := jobutils.GetJobPayload(t, sqlDB, jobspb.JobID(jobID))
@@ -8596,17 +8613,17 @@ func TestManifestBitFlip(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE r1; CREATE DATABASE r2; CREATE DATABASE r3;`)
const checksumError = "checksum mismatch"
t.Run("unencrypted", func(t *testing.T) {
- sqlDB.Exec(t, `BACKUP DATABASE data INTO 'nodelocal://1/bit_flip_unencrypted'`)
+ sqlDB.Exec(t, `BACKUP DATABASE data TO 'nodelocal://1/bit_flip_unencrypted'`)
flipBitInManifests(t, rawDir)
sqlDB.ExpectErr(t, checksumError,
- `RESTORE TABLE data.* FROM LATEST IN 'nodelocal://1/bit_flip_unencrypted' WITH into_db='r1'`)
+ `RESTORE data.* FROM 'nodelocal://1/bit_flip_unencrypted' WITH into_db='r1'`)
})
t.Run("encrypted", func(t *testing.T) {
- sqlDB.Exec(t, `BACKUP DATABASE data INTO 'nodelocal://1/bit_flip_encrypted' WITH encryption_passphrase = 'abc'`)
+ sqlDB.Exec(t, `BACKUP DATABASE data TO 'nodelocal://1/bit_flip_encrypted' WITH encryption_passphrase = 'abc'`)
flipBitInManifests(t, rawDir)
sqlDB.ExpectErr(t, checksumError,
- `RESTORE TABLE data.* FROM LATEST IN 'nodelocal://1/bit_flip_encrypted' WITH encryption_passphrase = 'abc', into_db = 'r3'`)
+ `RESTORE data.* FROM 'nodelocal://1/bit_flip_encrypted' WITH encryption_passphrase = 'abc', into_db = 'r3'`)
})
}
@@ -8670,11 +8687,11 @@ func TestRestoreJobEventLogging(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE r1`)
sqlDB.Exec(t, `CREATE TABLE r1.foo (id INT)`)
sqlDB.Exec(t, `INSERT INTO r1.foo VALUES (1), (2), (3)`)
- sqlDB.Exec(t, `BACKUP DATABASE r1 INTO 'nodelocal://1/eventlogging'`)
+ sqlDB.Exec(t, `BACKUP DATABASE r1 TO 'nodelocal://1/eventlogging'`)
sqlDB.Exec(t, `DROP DATABASE r1`)
beforeRestore := timeutil.Now()
- restoreQuery := `RESTORE DATABASE r1 FROM LATEST IN 'nodelocal://1/eventlogging'`
+ restoreQuery := `RESTORE DATABASE r1 FROM 'nodelocal://1/eventlogging'`
var jobID int64
var unused interface{}
@@ -8789,13 +8806,12 @@ func TestBackupOnlyPublicIndexes(t *testing.T) {
// First take a full backup.
fullBackup := localFoo + "/full"
- sqlDB.Exec(t, `BACKUP DATABASE data INTO $1 WITH revision_history`, fullBackup)
+ sqlDB.Exec(t, `BACKUP DATABASE data TO $1 WITH revision_history`, fullBackup)
var dataBankTableID descpb.ID
sqlDB.QueryRow(t, `SELECT 'data.bank'::regclass::int`).
Scan(&dataBankTableID)
- backupPath := getFullBackupPaths(t, sqlDB, fullBackup)[0]
- fullBackupSpans := getSpansFromManifest(ctx, t, locationToDir(fullBackup+backupPath))
+ fullBackupSpans := getSpansFromManifest(ctx, t, locationToDir(fullBackup))
require.Equal(t, 1, len(fullBackupSpans))
require.Regexp(t, fmt.Sprintf(".*/Table/%d/{1-2}", dataBankTableID), fullBackupSpans[0].String())
@@ -8809,8 +8825,8 @@ func TestBackupOnlyPublicIndexes(t *testing.T) {
return errors.Wrap(err, "creating index")
})
- incLoc := localFoo + "/inc"
- var inc1Ts, inc2Ts, inc3Ts string
+ inc1Loc := localFoo + "/inc1"
+ inc2Loc := localFoo + "/inc2"
g.Go(func() error {
defer backfillBlockers[0].allowToProceed()
@@ -8819,13 +8835,8 @@ func TestBackupOnlyPublicIndexes(t *testing.T) {
// Take an incremental backup and assert that it doesn't contain any
// data. The only added data was from the backfill, which should not
// be included because they are historical writes.
- sqlDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&inc1Ts)
- _, err := sqlDB.DB.ExecContext(ctx,
- fmt.Sprintf(
- `BACKUP DATABASE data INTO $1 AS OF SYSTEM TIME %s WITH revision_history, incremental_location=$2`,
- inc1Ts,
- ),
- fullBackup, incLoc)
+ _, err := sqlDB.DB.ExecContext(ctx, `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2 WITH revision_history`,
+ inc1Loc, fullBackup)
return errors.Wrap(err, "running inc 1 backup")
})
@@ -8833,50 +8844,47 @@ func TestBackupOnlyPublicIndexes(t *testing.T) {
defer backfillBlockers[1].allowToProceed()
backfillBlockers[1].waitUntilBlocked()
- sqlDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&inc2Ts)
// Take an incremental backup and assert that it doesn't contain any
- // data. The only added data was from the backfill, which should not
- // be included because they are historical writes.
- _, err := sqlDB.DB.ExecContext(ctx,
- fmt.Sprintf(
- `BACKUP DATABASE data INTO $1 AS OF SYSTEM TIME %s WITH revision_history, incremental_location=$2`,
- inc2Ts,
- ),
- fullBackup, incLoc)
+ // data. The only added data was from the backfill, which should not be
+ // included because they are historical writes.
+ _, err := sqlDB.DB.ExecContext(ctx, `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2, $3 WITH revision_history`,
+ inc2Loc, fullBackup, inc1Loc)
return errors.Wrap(err, "running inc 2 backup")
})
// Wait for the backfill and incremental backup to complete.
require.NoError(t, g.Wait())
+ inc1Spans := getSpansFromManifest(ctx, t, locationToDir(inc1Loc))
+ require.Equalf(t, 0, len(inc1Spans), "expected inc1 to not have any data, found %v", inc1Spans)
+
+ inc2Spans := getSpansFromManifest(ctx, t, locationToDir(inc2Loc))
+ require.Equalf(t, 0, len(inc2Spans), "expected inc2 to not have any data, found %v", inc2Spans)
+
// Take another incremental backup that should only contain the newly added
// index.
- sqlDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&inc3Ts)
- sqlDB.Exec(t,
- fmt.Sprintf(
- `BACKUP DATABASE data INTO $1 AS OF SYSTEM TIME %s WITH revision_history, incremental_location=$2`,
- inc3Ts,
- ),
- fullBackup, incLoc)
+ inc3Loc := localFoo + "/inc3"
+ sqlDB.Exec(t, `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2, $3, $4 WITH revision_history`,
+ inc3Loc, fullBackup, inc1Loc, inc2Loc)
+ inc3Spans := getSpansFromManifest(ctx, t, locationToDir(inc3Loc))
+ require.Equal(t, 1, len(inc3Spans))
+ require.Regexp(t, fmt.Sprintf(".*/Table/%d/{2-3}", dataBankTableID), inc3Spans[0].String())
// Drop the index.
sqlDB.Exec(t, `DROP INDEX new_balance_idx`)
// Take another incremental backup.
- sqlDB.Exec(t, `BACKUP DATABASE data INTO $1 WITH revision_history, incremental_location=$2`,
- fullBackup, incLoc)
+ inc4Loc := localFoo + "/inc4"
+ sqlDB.Exec(t, `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2, $3, $4, $5 WITH revision_history`,
+ inc4Loc, fullBackup, inc1Loc, inc2Loc, inc3Loc)
numAccountsStr := strconv.Itoa(numAccounts)
- // Restore the entire chain before dropping the index and check that we got the full indexes.
+ // Restore the entire chain and check that we got the full indexes.
{
sqlDB.Exec(t, `CREATE DATABASE restoredb;`)
- sqlDB.Exec(t,
- fmt.Sprintf(
- `RESTORE TABLE data.bank FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='restoredb', incremental_location=$2`,
- inc3Ts,
- ),
- fullBackup, incLoc)
+ sqlDB.Exec(t, `RESTORE data.bank FROM $1, $2, $3, $4 WITH into_db='restoredb'`,
+ fullBackup, inc1Loc, inc2Loc, inc3Loc)
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM restoredb.bank@[1]`, [][]string{{numAccountsStr}})
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM restoredb.bank@[2]`, [][]string{{numAccountsStr}})
kvCount, err := getKVCount(ctx, kvDB, codec, "restoredb", "bank")
@@ -8892,12 +8900,8 @@ func TestBackupOnlyPublicIndexes(t *testing.T) {
{
blockBackfills = make(chan struct{}) // block the synthesized schema change job
sqlDB.Exec(t, `CREATE DATABASE restoredb;`)
- sqlDB.Exec(t,
- fmt.Sprintf(
- `RESTORE TABLE data.bank FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH into_db='restoredb', incremental_location=$2`,
- inc2Ts,
- ),
- fullBackup, incLoc)
+ sqlDB.Exec(t, `RESTORE data.bank FROM $1, $2, $3 WITH into_db='restoredb';`,
+ fullBackup, inc1Loc, inc2Loc)
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM restoredb.bank@[1]`, [][]string{{numAccountsStr}})
sqlDB.ExpectErr(t, "index .* not found", `SELECT count(*) FROM restoredb.bank@[2]`)
@@ -8931,8 +8935,8 @@ func TestBackupOnlyPublicIndexes(t *testing.T) {
{
blockBackfills = make(chan struct{}) // block the synthesized schema change job
sqlDB.Exec(t, `CREATE DATABASE restoredb;`)
- sqlDB.Exec(t, `RESTORE TABLE data.bank FROM LATEST IN $1 WITH into_db='restoredb', incremental_location=$2`,
- fullBackup, incLoc)
+ sqlDB.Exec(t, `RESTORE data.bank FROM $1, $2, $3, $4, $5 WITH into_db='restoredb';`,
+ fullBackup, inc1Loc, inc2Loc, inc3Loc, inc4Loc)
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM restoredb.bank@[1]`, [][]string{{numAccountsStr}})
sqlDB.ExpectErr(t, "index .* not found", `SELECT count(*) FROM restoredb.bank@[2]`)
@@ -9087,7 +9091,7 @@ DROP INDEX idx_3;
// Wait for the old schema to exceed the GC window.
time.Sleep(2 * time.Second)
- sqlDB.Exec(t, `BACKUP TABLE test.t INTO 'nodelocal://1/backup_test' WITH revision_history`)
+ sqlDB.Exec(t, `BACKUP test.t TO 'nodelocal://1/backup_test' WITH revision_history`)
}
// TestDroppedDescriptorRevisionAndSystemDBIDClash is a regression test for a
@@ -9115,17 +9119,17 @@ func TestDroppedDescriptorRevisionAndSystemDBIDClash(t *testing.T) {
defer cleanupFn()
sqlDB.Exec(t, `CREATE TABLE foo (id INT);`)
- sqlDB.Exec(t, `BACKUP INTO 'nodelocal://1/foo' WITH revision_history;`)
+ sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
sqlDB.Exec(t, `DROP TABLE foo;`)
var aost string
sqlDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&aost)
- sqlDB.Exec(t, `BACKUP INTO $1 WITH revision_history`, localFoo)
+ sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir,
InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
- sqlDBRestore.Exec(t, "RESTORE FROM LATEST IN $1 AS OF SYSTEM TIME "+aost, localFoo)
+ sqlDBRestore.Exec(t, "RESTORE FROM $1 AS OF SYSTEM TIME "+aost, localFoo)
}
// TestRestoreNewDatabaseName tests the new_db_name optional feature for single database
@@ -9144,7 +9148,7 @@ func TestRestoreNewDatabaseName(t *testing.T) {
for i := 0; i < 10; i++ {
sqlDB.Exec(t, `INSERT INTO fkdb.fk (ind) VALUES ($1)`, i)
}
- sqlDB.Exec(t, `BACKUP INTO $1`, localFoo)
+ sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
// Ensure restore fails with new_db_name on cluster, table, and multiple database restores
t.Run("new_db_name syntax checks", func(t *testing.T) {
@@ -9155,16 +9159,16 @@ func TestRestoreNewDatabaseName(t *testing.T) {
sqlDB.ExpectErr(t, expectedErr, "RESTORE DATABASE fkdb, "+
"data FROM $1 with new_db_name = 'new_fkdb'", localFoo)
- sqlDB.ExpectErr(t, expectedErr, "RESTORE TABLE fkdb.fk FROM LATEST IN $1 with new_db_name = 'new_fkdb'",
+ sqlDB.ExpectErr(t, expectedErr, "RESTORE TABLE fkdb.fk FROM $1 with new_db_name = 'new_fkdb'",
localFoo)
})
// Should fail because 'fkbd' database is still in cluster
sqlDB.ExpectErr(t, `database "fkdb" already exists`,
- "RESTORE DATABASE fkdb FROM LATEST IN $1", localFoo)
+ "RESTORE DATABASE fkdb FROM $1", localFoo)
// Should pass because 'new_fkdb' is not in cluster
- sqlDB.Exec(t, "RESTORE DATABASE fkdb FROM LATEST IN $1 WITH new_db_name = 'new_fkdb'", localFoo)
+ sqlDB.Exec(t, "RESTORE DATABASE fkdb FROM $1 WITH new_db_name = 'new_fkdb'", localFoo)
// Verify restored database is in cluster with new name
sqlDB.CheckQueryResults(t,
@@ -9177,7 +9181,7 @@ func TestRestoreNewDatabaseName(t *testing.T) {
// Should fail because we just restored new_fkbd into cluster
sqlDB.ExpectErr(t, `database "new_fkdb" already exists`,
- "RESTORE DATABASE fkdb FROM LATEST IN $1 WITH new_db_name = 'new_fkdb'", localFoo)
+ "RESTORE DATABASE fkdb FROM $1 WITH new_db_name = 'new_fkdb'", localFoo)
}
// TestRestoreRemappingOfExistingUDTInColExpr is a regression test for a nil
@@ -9194,11 +9198,11 @@ func TestRestoreRemappingOfExistingUDTInColExpr(t *testing.T) {
sqlDB.Exec(t, `CREATE TYPE status AS ENUM ('open', 'closed', 'inactive');`)
sqlDB.Exec(t, `CREATE TABLE foo (id INT PRIMARY KEY, what status default 'open');`)
- sqlDB.Exec(t, `BACKUP DATABASE data INTO 'nodelocal://1/foo';`)
+ sqlDB.Exec(t, `BACKUP DATABASE data to 'nodelocal://1/foo';`)
sqlDB.Exec(t, `DROP TABLE foo CASCADE;`)
sqlDB.Exec(t, `DROP TYPE status;`)
sqlDB.Exec(t, `CREATE TYPE status AS ENUM ('open', 'closed', 'inactive');`)
- sqlDB.Exec(t, `RESTORE TABLE foo FROM LATEST IN 'nodelocal://1/foo';`)
+ sqlDB.Exec(t, `RESTORE TABLE foo FROM 'nodelocal://1/foo';`)
}
// TestGCDropIndexSpanExpansion is a regression test for
@@ -9298,16 +9302,16 @@ CREATE DATABASE db;
CREATE SCHEMA db.s;
`)
- sqlDB.Exec(t, `BACKUP DATABASE db INTO 'nodelocal://1/test/1'`)
+ sqlDB.Exec(t, `BACKUP DATABASE db TO 'nodelocal://1/test/1'`)
sqlDB.Exec(t, `DROP DATABASE db`)
- sqlDB.ExpectErr(t, "boom", `RESTORE DATABASE db FROM LATEST IN 'nodelocal://1/test/1'`)
+ sqlDB.ExpectErr(t, "boom", `RESTORE DATABASE db FROM 'nodelocal://1/test/1'`)
sqlDB.Exec(t, `
CREATE DATABASE db;
CREATE SCHEMA db.s;
`)
- sqlDB.Exec(t, `BACKUP DATABASE db INTO 'nodelocal://1/test/2'`)
+ sqlDB.Exec(t, `BACKUP DATABASE db TO 'nodelocal://1/test/2'`)
}
// TestBackupRestoreSeperateIncrementalPrefix tests that a backup/restore round
@@ -9364,9 +9368,9 @@ func TestBackupRestoreSeparateIncrementalPrefix(t *testing.T) {
sqlDB.Exec(t, `INSERT INTO fkdb.fk (ind) VALUES ($1)`, 200)
- sib := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location=%s", dest, inc)
+ sib := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location = %s", dest, inc)
sqlDB.Exec(t, sib)
- sir := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'inc_fkdb', incremental_location=%s", dest, inc)
+ sir := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'inc_fkdb', incremental_location = %s", dest, inc)
sqlDB.Exec(t, sir)
ib := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s", dest)
@@ -9474,7 +9478,7 @@ func TestExcludeDataFromBackupAndRestore(t *testing.T) {
require.Len(t, sqlDB.QueryStr(t, `SELECT * FROM data2.baz`), 0)
before := atomic.LoadInt64(&exportReqsAtomic)
- sqlDB.Exec(t, `BACKUP TABLE data.foo INTO $1`, localFoo+"/tbl")
+ sqlDB.Exec(t, `BACKUP data.foo TO $1`, localFoo+"/tbl")
after := atomic.LoadInt64(&exportReqsAtomic)
require.Equal(t, before, after)
}
@@ -9561,7 +9565,7 @@ func TestExportRequestBelowGCThresholdOnDataExcludedFromBackup(t *testing.T) {
return nil
})
- _, err = conn.Exec(fmt.Sprintf("BACKUP TABLE foo INTO $1 AS OF SYSTEM TIME '%s'", tsBefore), localFoo+"/fail")
+ _, err = conn.Exec(fmt.Sprintf("BACKUP TABLE foo TO $1 AS OF SYSTEM TIME '%s'", tsBefore), localFoo+"/fail")
testutils.IsError(err, "must be after replica GC threshold")
_, err = conn.Exec(`ALTER TABLE foo SET (exclude_data_from_backup = true)`)
@@ -9578,7 +9582,7 @@ func TestExportRequestBelowGCThresholdOnDataExcludedFromBackup(t *testing.T) {
return true, nil
})
- _, err = conn.Exec(fmt.Sprintf("BACKUP TABLE foo INTO $1 AS OF SYSTEM TIME '%s'", tsBefore), localFoo+"/succeed")
+ _, err = conn.Exec(fmt.Sprintf("BACKUP TABLE foo TO $1 AS OF SYSTEM TIME '%s'", tsBefore), localFoo+"/succeed")
require.NoError(t, err)
}
@@ -9819,9 +9823,9 @@ func TestBackupRestoreSystemUsers(t *testing.T) {
sqlDB.Exec(t, `GRANT app_role TO test_role;`) // 'test_role' is a member of 'app_role'
sqlDB.Exec(t, `GRANT admin, app_role TO app; GRANT test_role TO test`)
sqlDB.Exec(t, `CREATE DATABASE db; CREATE TABLE db.foo (ind INT)`)
- sqlDB.Exec(t, `BACKUP INTO $1`, localFoo+"/1")
- sqlDB.Exec(t, `BACKUP DATABASE db INTO $1`, localFoo+"/2")
- sqlDB.Exec(t, `BACKUP TABLE system.users INTO $1`, localFoo+"/3")
+ sqlDB.Exec(t, `BACKUP TO $1`, localFoo+"/1")
+ sqlDB.Exec(t, `BACKUP DATABASE db TO $1`, localFoo+"/2")
+ sqlDB.Exec(t, `BACKUP TABLE system.users TO $1`, localFoo+"/3")
// User 'test' exists in both clusters but 'app' only exists in the backup
sqlDBRestore.Exec(t, `CREATE USER test`)
@@ -9831,7 +9835,7 @@ func TestBackupRestoreSystemUsers(t *testing.T) {
sqlDBRestore.Exec(t, `CREATE DATABASE db1; CREATE DATABASE db2; CREATE DATABASE db3`)
t.Run("system users", func(t *testing.T) {
- sqlDBRestore.Exec(t, "RESTORE SYSTEM USERS FROM LATEST IN $1", localFoo+"/1")
+ sqlDBRestore.Exec(t, "RESTORE SYSTEM USERS FROM $1", localFoo+"/1")
// Role 'app_role' and user 'app' will be added, and 'app' is granted with 'app_role'
// User test will remain untouched with no role granted
@@ -9861,13 +9865,13 @@ func TestBackupRestoreSystemUsers(t *testing.T) {
t.Run("restore-from-backup-with-no-system-users", func(t *testing.T) {
sqlDBRestore.ExpectErr(t, "cannot restore system users as no system.users table in the backup",
- "RESTORE SYSTEM USERS FROM LATEST IN $1", localFoo+"/2")
+ "RESTORE SYSTEM USERS FROM $1", localFoo+"/2")
})
_, sqlDBRestore1, cleanupEmptyCluster1 := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster1()
t.Run("restore-from-backup-with-no-system-role-members", func(t *testing.T) {
- sqlDBRestore1.Exec(t, "RESTORE SYSTEM USERS FROM LATEST IN $1", localFoo+"/3")
+ sqlDBRestore1.Exec(t, "RESTORE SYSTEM USERS FROM $1", localFoo+"/3")
sqlDBRestore1.CheckQueryResults(t, `SELECT "role", member, "isAdmin" FROM system.role_members`, [][]string{
{"admin", "root", "true"},
})
@@ -9894,7 +9898,7 @@ func TestBackupRestoreSystemUsers(t *testing.T) {
// Create testuser and verify that the system user ids are
// allocated properly in the restore.
sqlDBRestore2.Exec(t, "CREATE USER testuser")
- sqlDBRestore2.Exec(t, "RESTORE SYSTEM USERS FROM LATEST IN $1", localFoo+"/3")
+ sqlDBRestore2.Exec(t, "RESTORE SYSTEM USERS FROM $1", localFoo+"/3")
sqlDBRestore2.CheckQueryResults(t, `SELECT role, member, "isAdmin" FROM system.role_members`, [][]string{
{"admin", "root", "true"},
})
@@ -9932,11 +9936,11 @@ func TestUserfileNormalizationIncrementalShowBackup(t *testing.T) {
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
- query := fmt.Sprintf("BACKUP bank INTO %s", userfile)
+ query := fmt.Sprintf("BACKUP bank TO %s", userfile)
sqlDB.Exec(t, query)
- query = fmt.Sprintf("BACKUP bank INTO %s", userfile)
+ query = fmt.Sprintf("BACKUP bank TO %s", userfile)
sqlDB.Exec(t, query)
- query = fmt.Sprintf("SHOW BACKUP FROM LATEST IN %s", userfile)
+ query = fmt.Sprintf("SHOW BACKUP %s", userfile)
sqlDB.Exec(t, query)
}
@@ -9998,7 +10002,7 @@ func TestBackupRestoreOldIncrementalDefault(t *testing.T) {
sqlDB.Exec(t, `INSERT INTO fkdb.fk (ind) VALUES ($1)`, 200)
- sib := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location=%s", dest, inc)
+ sib := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location = %s", dest, inc)
sqlDB.Exec(t, sib)
sir := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'inc_fkdb'", dest)
@@ -10033,10 +10037,10 @@ func TestBackupRestoreErrorsOnBothDefaultsPopulated(t *testing.T) {
fb := fmt.Sprintf("BACKUP DATABASE fkdb INTO %s", base)
sqlDB.Exec(t, fb)
- sibOld := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location=%s", base, oldInc)
+ sibOld := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location = %s", base, oldInc)
sqlDB.Exec(t, sibOld)
- sibNew := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location=%s", base, newInc)
+ sibNew := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location = %s", base, newInc)
sqlDB.Exec(t, sibNew)
irDefault := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'trad_fkdb'", base)
@@ -10108,7 +10112,7 @@ func TestBackupRestoreSeparateExplicitIsDefault(t *testing.T) {
sqlDB.Exec(t, `INSERT INTO fkdb.fk (ind) VALUES ($1)`, 200)
- sib := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location=%s", dest, inc)
+ sib := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location = %s", dest, inc)
sqlDB.Exec(t, sib)
{
// Locality Aware Show Backup validation
@@ -10138,7 +10142,7 @@ func TestBackupRestoreSeparateExplicitIsDefault(t *testing.T) {
sqlDB.Exec(t, fmt.Sprintf("SHOW BACKUP FROM LATEST IN %s WITH incremental_location= %s", dest, inc))
}
- sir := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'inc_fkdb', incremental_location=%s", dest, inc)
+ sir := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'inc_fkdb', incremental_location = %s", dest, inc)
sqlDB.Exec(t, sir)
ib := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s", dest)
@@ -10575,10 +10579,10 @@ func TestViewRevisions(t *testing.T) {
USE test;
CREATE VIEW v AS SELECT 1;
`)
- sqlDB.Exec(t, `BACKUP INTO 'nodelocal://1/foo' WITH revision_history;`)
- sqlDB.Exec(t, `BACKUP INTO 'nodelocal://1/foo' WITH revision_history;`)
+ sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
+ sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
sqlDB.Exec(t, `ALTER VIEW v RENAME TO v2;`)
- sqlDB.Exec(t, `BACKUP INTO 'nodelocal://1/foo' WITH revision_history;`)
+ sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
}
// This tests checks that backups do not contain spans of views, but do contain
@@ -11044,7 +11048,7 @@ func TestBackupInLocality(t *testing.T) {
{node: 3, filter: "region=central,dc=1", err: "no instances found"},
} {
db := sqlutils.MakeSQLRunner(cluster.ServerConn(tc.node - 1))
- db.ExpectErr(t, tc.err, "BACKUP TABLE system.users INTO $1 WITH execution locality = $2", fmt.Sprintf("userfile:///tc%d", i), tc.filter)
+ db.ExpectErr(t, tc.err, "BACKUP system.users INTO $1 WITH execution locality = $2", fmt.Sprintf("userfile:///tc%d", i), tc.filter)
}
}
@@ -11091,7 +11095,7 @@ func TestExportResponseDataSizeZeroCPUPagination(t *testing.T) {
sqlDB.Exec(t, `INSERT INTO foo VALUES (1), (2)`)
sqlDB.Exec(t, `DELETE FROM foo WHERE a = 1`)
sqlDB.Exec(t, `BACKUP TABLE foo INTO 'nodelocal://1/foo'`)
- require.GreaterOrEqual(t, numRequests, 2)
+ require.Equal(t, 2, numRequests)
}
func TestBackupRestoreForeignKeys(t *testing.T) {
@@ -11171,7 +11175,7 @@ CREATE TABLE child_pk (k INT8 PRIMARY KEY REFERENCES parent);
// Test restoring different objects from the backup.
sqlDB.Exec(t, `CREATE DATABASE ts`)
- sqlDB.Exec(t, `RESTORE TABLE test.rev_times FROM LATEST IN $1 WITH into_db = 'ts'`, localFoo)
+ sqlDB.Exec(t, `RESTORE test.rev_times FROM LATEST IN $1 WITH into_db = 'ts'`, localFoo)
for _, ts := range sqlDB.QueryStr(t, `SELECT logical_time FROM ts.rev_times`) {
sqlDB.Exec(t, fmt.Sprintf(`RESTORE DATABASE test FROM LATEST IN $1 AS OF SYSTEM TIME %s`, ts[0]), localFoo)
// Just rendering the constraints loads and validates schema.
@@ -11180,11 +11184,11 @@ CREATE TABLE child_pk (k INT8 PRIMARY KEY REFERENCES parent);
// Restore a couple tables, including parent but not child_pk.
sqlDB.Exec(t, `CREATE DATABASE test`)
- sqlDB.Exec(t, fmt.Sprintf(`RESTORE TABLE test.circular FROM LATEST IN $1 AS OF SYSTEM TIME %s`, ts[0]), localFoo)
+ sqlDB.Exec(t, fmt.Sprintf(`RESTORE test.circular FROM LATEST IN $1 AS OF SYSTEM TIME %s`, ts[0]), localFoo)
require.Equal(t, [][]string{
{"test.public.circular", "CREATE TABLE public.circular (\n\tk INT8 NOT NULL,\n\tselfid INT8 NULL,\n\tCONSTRAINT circular_pkey PRIMARY KEY (k ASC),\n\tCONSTRAINT self_fk FOREIGN KEY (selfid) REFERENCES public.circular(selfid) NOT VALID,\n\tUNIQUE INDEX circular_selfid_key (selfid ASC)\n)"},
}, sqlDB.QueryStr(t, `SHOW CREATE TABLE test.circular`))
- sqlDB.Exec(t, fmt.Sprintf(`RESTORE TABLE test.parent, test.child FROM LATEST IN $1 AS OF SYSTEM TIME %s `, ts[0]), localFoo)
+ sqlDB.Exec(t, fmt.Sprintf(`RESTORE test.parent, test.child FROM LATEST IN $1 AS OF SYSTEM TIME %s `, ts[0]), localFoo)
sqlDB.Exec(t, `SELECT * FROM pg_catalog.pg_constraint`)
sqlDB.Exec(t, `DROP DATABASE test`)
@@ -11192,9 +11196,9 @@ CREATE TABLE child_pk (k INT8 PRIMARY KEY REFERENCES parent);
sqlDB.Exec(t, `CREATE DATABASE test`)
for _, name := range []string{"child_pk", "child", "circular", "parent"} {
if name == "child" || name == "child_pk" {
- sqlDB.ExpectErr(t, "cannot restore table.*without referenced table", fmt.Sprintf(`RESTORE TABLE test.%s FROM LATEST IN $1 AS OF SYSTEM TIME %s`, name, ts[0]), localFoo)
+ sqlDB.ExpectErr(t, "cannot restore table.*without referenced table", fmt.Sprintf(`RESTORE test.%s FROM LATEST IN $1 AS OF SYSTEM TIME %s`, name, ts[0]), localFoo)
}
- sqlDB.Exec(t, fmt.Sprintf(`RESTORE TABLE test.%s FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH skip_missing_foreign_keys`, name, ts[0]), localFoo)
+ sqlDB.Exec(t, fmt.Sprintf(`RESTORE test.%s FROM LATEST IN $1 AS OF SYSTEM TIME %s WITH skip_missing_foreign_keys`, name, ts[0]), localFoo)
}
sqlDB.Exec(t, `SELECT * FROM pg_catalog.pg_constraint`)
sqlDB.Exec(t, `DROP DATABASE test`)
diff --git a/pkg/ccl/backupccl/backupinfo/BUILD.bazel b/pkg/ccl/backupccl/backupinfo/BUILD.bazel
index b133b8b67e99..d19f1ad4db17 100644
--- a/pkg/ccl/backupccl/backupinfo/BUILD.bazel
+++ b/pkg/ccl/backupccl/backupinfo/BUILD.bazel
@@ -84,7 +84,6 @@ go_test(
"//pkg/sql/catalog/descpb",
"//pkg/sql/isql",
"//pkg/testutils/serverutils",
- "//pkg/testutils/sqlutils",
"//pkg/testutils/testcluster",
"//pkg/util/bulk",
"//pkg/util/hlc",
diff --git a/pkg/ccl/backupccl/backupinfo/backup_metadata_test.go b/pkg/ccl/backupccl/backupinfo/backup_metadata_test.go
index 4d05bc93557a..ae36c5c2b0f6 100644
--- a/pkg/ccl/backupccl/backupinfo/backup_metadata_test.go
+++ b/pkg/ccl/backupccl/backupinfo/backup_metadata_test.go
@@ -7,7 +7,6 @@ package backupinfo_test
import (
"context"
- "fmt"
"sort"
"testing"
@@ -25,7 +24,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/isql"
- "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/bulk"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
@@ -56,9 +54,8 @@ func TestMetadataSST(t *testing.T) {
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulkio.write_metadata_sst.enabled = true`)
// Check that backup metadata is correct on full cluster backup.
- sqlDB.Exec(t, `BACKUP INTO $1`, userfile)
- backupPath := userfile + getBackupPath(t, sqlDB, userfile)
- checkMetadata(ctx, t, tc, backupPath)
+ sqlDB.Exec(t, `BACKUP TO $1`, userfile)
+ checkMetadata(ctx, t, tc, userfile)
// Check for correct backup metadata on incremental backup with revision
// history.
@@ -68,28 +65,25 @@ func TestMetadataSST(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE emptydb.bar(k INT, v INT)`)
sqlDB.Exec(t, `DROP DATABASE emptydb`)
- sqlDB.Exec(t, `BACKUP INTO $1 WITH revision_history`, userfile)
- checkMetadata(ctx, t, tc, backupPath)
+ sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, userfile)
+ checkMetadata(ctx, t, tc, userfile)
// Check for correct backup metadata on single table backups.
userfile1 := "userfile:///1"
- sqlDB.Exec(t, `BACKUP TABLE data.bank INTO $1 WITH revision_history`, userfile1)
- backupPath1 := userfile1 + getBackupPath(t, sqlDB, userfile1)
- checkMetadata(ctx, t, tc, backupPath1)
+ sqlDB.Exec(t, `BACKUP TABLE data.bank TO $1 WITH revision_history`, userfile1)
+ checkMetadata(ctx, t, tc, userfile1)
// Check for correct backup metadata on tenant backups.
userfile2 := "userfile:///2"
_, err := tc.Servers[0].TenantController().StartTenant(ctx, base.TestTenantArgs{TenantID: roachpb.MustMakeTenantID(10)})
require.NoError(t, err)
- sqlDB.Exec(t, `BACKUP TENANT 10 INTO $1`, userfile2)
- backupPath2 := userfile2 + getBackupPath(t, sqlDB, userfile2)
- checkMetadata(ctx, t, tc, backupPath2)
+ sqlDB.Exec(t, `BACKUP TENANT 10 TO $1`, userfile2)
+ checkMetadata(ctx, t, tc, userfile2)
}
func checkMetadata(
ctx context.Context, t *testing.T, tc *testcluster.TestCluster, backupLoc string,
) {
- t.Helper()
store, err := cloud.ExternalStorageFromURI(
ctx,
backupLoc,
@@ -304,16 +298,6 @@ func checkStats(
require.Equal(t, expectedStats, metaStats)
}
-// Gets the first backup path in a userfile path.
-// Note: the tests in this file expects only one backup in the path so only fetches the first backup
-func getBackupPath(t *testing.T, db *sqlutils.SQLRunner, userfile string) string {
- rows := db.Query(t, "SHOW BACKUPS IN $1", userfile)
- var result struct{ path string }
- require.True(t, rows.Next(), fmt.Sprintf("Could not find backup path in %s", userfile))
- require.NoError(t, rows.Scan(&result.path))
- return result.path
-}
-
func testingReadBackupManifest(
ctx context.Context, store cloud.ExternalStorage, file string,
) (*backuppb.BackupManifest, error) {
diff --git a/pkg/ccl/backupccl/backupresolver/targets_test.go b/pkg/ccl/backupccl/backupresolver/targets_test.go
index 083aa709497a..69cb9fe7cc21 100644
--- a/pkg/ccl/backupccl/backupresolver/targets_test.go
+++ b/pkg/ccl/backupccl/backupresolver/targets_test.go
@@ -250,7 +250,7 @@ func TestDescriptorsMatchingTargets(t *testing.T) {
searchPath := sessiondata.MakeSearchPath([]string{"public", "pg_catalog"})
for i, test := range tests {
t.Run(fmt.Sprintf("%d/%s/%s", i, test.sessionDatabase, test.pattern), func(t *testing.T) {
- sql := fmt.Sprintf(`BACKUP %s INTO 'ignored'`, test.pattern)
+ sql := fmt.Sprintf(`BACKUP %s TO 'ignored'`, test.pattern)
stmt, err := parser.ParseOne(sql)
if err != nil {
t.Fatal(err)
diff --git a/pkg/ccl/backupccl/bench_covering_test.go b/pkg/ccl/backupccl/bench_covering_test.go
index c9ca1a064aa5..c99175b3c0d0 100644
--- a/pkg/ccl/backupccl/bench_covering_test.go
+++ b/pkg/ccl/backupccl/bench_covering_test.go
@@ -91,10 +91,11 @@ func BenchmarkRestoreEntryCover(b *testing.B) {
filter, err := makeSpanCoveringFilter(
backups[numBackups-1].Spans,
[]jobspb.RestoreProgress_FrontierEntry{},
+ nil,
introducedSpanFrontier,
0,
defaultMaxFileCount,
- )
+ false)
require.NoError(b, err)
defer filter.close()
diff --git a/pkg/ccl/backupccl/create_scheduled_backup_test.go b/pkg/ccl/backupccl/create_scheduled_backup_test.go
index bb9a1b5a1285..0a4bb17ad9d4 100644
--- a/pkg/ccl/backupccl/create_scheduled_backup_test.go
+++ b/pkg/ccl/backupccl/create_scheduled_backup_test.go
@@ -15,6 +15,7 @@ import (
"time"
"github.com/cockroachdb/cockroach/pkg/base"
+ "github.com/cockroachdb/cockroach/pkg/ccl/backupccl/backupdest"
"github.com/cockroachdb/cockroach/pkg/ccl/backupccl/backuppb"
"github.com/cockroachdb/cockroach/pkg/ccl/utilccl"
"github.com/cockroachdb/cockroach/pkg/jobs"
@@ -24,6 +25,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/scheduledjobs"
"github.com/cockroachdb/cockroach/pkg/scheduledjobs/schedulebase"
+ "github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
@@ -33,6 +35,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
+ "github.com/cockroachdb/cockroach/pkg/util/ioctx"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb"
@@ -959,10 +962,17 @@ INSERT INTO t1 values (-1), (10), (-100);
}
// Verify backup.
+ execCfg := th.server.ExecutorConfig().(sql.ExecutorConfig)
+ ctx := context.Background()
+ store, err := execCfg.DistSQLSrv.ExternalStorageFromURI(ctx, destination, username.RootUserName())
+ require.NoError(t, err)
+ r, err := backupdest.FindLatestFile(ctx, store)
+ require.NoError(t, err)
+ latest, err := ioctx.ReadAll(ctx, r)
+ require.NoError(t, err)
backedUp := th.sqlDB.QueryStr(t,
- `SELECT database_name, object_name FROM [SHOW BACKUP FROM LATEST IN $1] WHERE object_type='table' ORDER BY database_name, object_name`,
- destination,
- )
+ `SELECT database_name, object_name FROM [SHOW BACKUP $1] WHERE object_type='table' ORDER BY database_name, object_name`,
+ fmt.Sprintf("%s/%s", destination, string(latest)))
require.Equal(t, tc.verifyTables, backedUp)
})
}
diff --git a/pkg/ccl/backupccl/generative_split_and_scatter_processor.go b/pkg/ccl/backupccl/generative_split_and_scatter_processor.go
index c393c050d757..22ecb66d4ce5 100644
--- a/pkg/ccl/backupccl/generative_split_and_scatter_processor.go
+++ b/pkg/ccl/backupccl/generative_split_and_scatter_processor.go
@@ -477,9 +477,11 @@ func runGenerativeSplitAndScatter(
filter, err := makeSpanCoveringFilter(
spec.Spans,
spec.CheckpointedSpans,
+ spec.HighWater,
introducedSpanFrontier,
spec.TargetSize,
- spec.MaxFileCount)
+ spec.MaxFileCount,
+ spec.UseFrontierCheckpointing)
if err != nil {
return errors.Wrap(err, "failed to make span covering filter")
}
diff --git a/pkg/ccl/backupccl/generative_split_and_scatter_processor_test.go b/pkg/ccl/backupccl/generative_split_and_scatter_processor_test.go
index 21957e83d2e5..f751c52efdf2 100644
--- a/pkg/ccl/backupccl/generative_split_and_scatter_processor_test.go
+++ b/pkg/ccl/backupccl/generative_split_and_scatter_processor_test.go
@@ -267,6 +267,7 @@ func makeTestingGenerativeSplitAndScatterSpec(
EndTime: hlc.Timestamp{},
Spans: requiredSpans,
BackupLocalityInfo: nil,
+ HighWater: nil,
UserProto: "",
ChunkSize: 1,
TargetSize: 1,
diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go
index c8efb577ee62..4fc48a8fd807 100644
--- a/pkg/ccl/backupccl/restore_job.go
+++ b/pkg/ccl/backupccl/restore_job.go
@@ -314,11 +314,15 @@ func restore(
return emptyRowCount, err
}
+ ver := job.Payload().CreationClusterVersion
+ // TODO(radu,msbutler,stevendanna): we might be able to remove this now?
+ on231 := ver.Major > 23 || (ver.Major == 23 && ver.Minor >= 1)
restoreCheckpoint := job.Progress().Details.(*jobspb.Progress_Restore).Restore.Checkpoint
requiredSpans := dataToRestore.getSpans()
progressTracker, err := makeProgressTracker(
requiredSpans,
restoreCheckpoint,
+ on231,
restoreCheckpointMaxBytes.Get(&execCtx.ExecCfg().Settings.SV),
endTime)
if err != nil {
@@ -349,9 +353,11 @@ func restore(
return makeSpanCoveringFilter(
requiredSpans,
restoreCheckpoint,
+ job.Progress().Details.(*jobspb.Progress_Restore).Restore.HighWater,
introducedSpanFrontier,
targetSize,
- maxFileCount)
+ maxFileCount,
+ progressTracker.useFrontier)
}(); err != nil {
return roachpb.RowCount{}, err
}
@@ -430,6 +436,13 @@ func restore(
}
tasks = append(tasks, jobProgressLoop)
}
+ if !progressTracker.useFrontier {
+ // This goroutine feeds the deprecated high water mark variant of the
+ // generativeCheckpointLoop.
+ tasks = append(tasks, func(ctx context.Context) error {
+ return genSpan(ctx, progressTracker.inFlightSpanFeeder)
+ })
+ }
progCh := make(chan *execinfrapb.RemoteProducerMetadata_BulkProcessorProgress)
if !details.ExperimentalOnline {
diff --git a/pkg/ccl/backupccl/restore_mid_schema_change_test.go b/pkg/ccl/backupccl/restore_mid_schema_change_test.go
index b26216eb4ce3..5725d3857891 100644
--- a/pkg/ccl/backupccl/restore_mid_schema_change_test.go
+++ b/pkg/ccl/backupccl/restore_mid_schema_change_test.go
@@ -270,7 +270,7 @@ func restoreMidSchemaChange(
if isSchemaOnly {
restoreQuery = restoreQuery + ", schema_only"
}
- log.Infof(context.Background(), "%+v", sqlDB.QueryStr(t, "SHOW BACKUP FROM LATEST IN $1", localFoo))
+ log.Infof(context.Background(), "%+v", sqlDB.QueryStr(t, "SHOW BACKUP LATEST IN $1", localFoo))
sqlDB.Exec(t, restoreQuery, localFoo)
// Wait for all jobs to terminate. Some may fail since we don't restore
// adding spans.
diff --git a/pkg/ccl/backupccl/restore_old_versions_test.go b/pkg/ccl/backupccl/restore_old_versions_test.go
index d178edd15f58..0b0bb4c825fe 100644
--- a/pkg/ccl/backupccl/restore_old_versions_test.go
+++ b/pkg/ccl/backupccl/restore_old_versions_test.go
@@ -346,7 +346,7 @@ func fullClusterRestoreSystemRoleMembersWithoutIDs(exportDir string) func(t *tes
// manifest version is always less than the MinSupportedVersion which will
// in turn fail the restore unless we pass in the specified option to elide
// the compatibility check.
- sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '/' IN '%s' WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION", localFoo))
+ sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '%s' WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION", localFoo))
sqlDB.CheckQueryResults(t, "SELECT * FROM system.role_members", [][]string{
{"admin", "root", "true", "2", "1"},
@@ -381,7 +381,7 @@ func fullClusterRestoreSystemPrivilegesWithoutIDs(exportDir string) func(t *test
// manifest version is always less than the MinSupportedVersion which will
// in turn fail the restore unless we pass in the specified option to elide
// the compatibility check.
- sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '/' IN '%s' WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION", localFoo))
+ sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '%s' WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION", localFoo))
sqlDB.CheckQueryResults(t, "SELECT * FROM system.privileges", [][]string{
{"public", "/vtable/crdb_internal/tables", "{}", "{}", "4"},
@@ -416,7 +416,7 @@ func fullClusterRestoreSystemDatabaseRoleSettingsWithoutIDs(exportDir string) fu
// manifest version is always less than the MinSupportedVersion which will
// in turn fail the restore unless we pass in the specified option to elide
// the compatibility check.
- sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '/' IN '%s' WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION", localFoo))
+ sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '%s' WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION", localFoo))
sqlDB.CheckQueryResults(t, "SELECT * FROM system.database_role_settings", [][]string{
{"0", "", "{timezone=America/New_York}", "0"},
@@ -451,7 +451,7 @@ func fullClusterRestoreSystemExternalConnectionsWithoutIDs(exportDir string) fun
// manifest version is always less than the MinSupportedVersion which will
// in turn fail the restore unless we pass in the specified option to elide
// the compatibility check.
- sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '/' IN '%s' WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION", localFoo))
+ sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '%s' WITH UNSAFE_RESTORE_INCOMPATIBLE_VERSION", localFoo))
sqlDB.CheckQueryResults(t, "SELECT * FROM system.external_connections", [][]string{
{"connection1", "2023-03-20 01:26:50.174781 +0000 +0000", "2023-03-20 01:26:50.174781 +0000 +0000", "STORAGE",
diff --git a/pkg/ccl/backupccl/restore_online_test.go b/pkg/ccl/backupccl/restore_online_test.go
index 20f6b774e9e6..58a62f32d71e 100644
--- a/pkg/ccl/backupccl/restore_online_test.go
+++ b/pkg/ccl/backupccl/restore_online_test.go
@@ -101,11 +101,11 @@ func TestOnlineRestorePartitioned(t *testing.T) {
)
defer cleanupFn()
- sqlDB.Exec(t, `BACKUP DATABASE data INTO ('nodelocal://1/a?COCKROACH_LOCALITY=default',
+ sqlDB.Exec(t, `BACKUP DATABASE data TO ('nodelocal://1/a?COCKROACH_LOCALITY=default',
'nodelocal://1/b?COCKROACH_LOCALITY=dc%3Ddc2',
'nodelocal://1/c?COCKROACH_LOCALITY=dc%3Ddc3')`)
- j := sqlDB.QueryStr(t, `RESTORE DATABASE data FROM LATEST IN ('nodelocal://1/a?COCKROACH_LOCALITY=default',
+ j := sqlDB.QueryStr(t, `RESTORE DATABASE data FROM ('nodelocal://1/a?COCKROACH_LOCALITY=default',
'nodelocal://1/b?COCKROACH_LOCALITY=dc%3Ddc2',
'nodelocal://1/c?COCKROACH_LOCALITY=dc%3Ddc3') WITH new_db_name='d2', EXPERIMENTAL DEFERRED COPY`)
diff --git a/pkg/ccl/backupccl/restore_processor_planning.go b/pkg/ccl/backupccl/restore_processor_planning.go
index 69325f5d73bb..0c84e70db7e0 100644
--- a/pkg/ccl/backupccl/restore_processor_planning.go
+++ b/pkg/ccl/backupccl/restore_processor_planning.go
@@ -173,17 +173,21 @@ func distRestore(
EndTime: md.restoreTime,
Spans: md.dataToRestore.getSpans(),
BackupLocalityInfo: md.backupLocalityInfo,
+ HighWater: md.spanFilter.highWaterMark,
UserProto: execCtx.User().EncodeProto(),
TargetSize: md.spanFilter.targetSize,
MaxFileCount: int64(md.spanFilter.maxFileCount),
ChunkSize: int64(chunkSize),
NumEntries: int64(md.numImportSpans),
+ UseFrontierCheckpointing: md.spanFilter.useFrontierCheckpointing,
NumNodes: int64(numNodes),
JobID: int64(md.jobID),
SQLInstanceIDs: instanceIDs,
ExclusiveFileSpanComparison: md.exclusiveEndKeys,
}
- spec.CheckpointedSpans = persistFrontier(md.spanFilter.checkpointFrontier, 0)
+ if md.spanFilter.useFrontierCheckpointing {
+ spec.CheckpointedSpans = persistFrontier(md.spanFilter.checkpointFrontier, 0)
+ }
splitAndScatterProc := physicalplan.Processor{
SQLInstanceID: execCtx.ExecCfg().NodeInfo.NodeID.SQLInstanceID(),
diff --git a/pkg/ccl/backupccl/restore_progress.go b/pkg/ccl/backupccl/restore_progress.go
index a9bb9d42bb8c..df67d7c0819c 100644
--- a/pkg/ccl/backupccl/restore_progress.go
+++ b/pkg/ccl/backupccl/restore_progress.go
@@ -17,6 +17,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/log"
spanUtils "github.com/cockroachdb/cockroach/pkg/util/span"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
+ "github.com/cockroachdb/errors"
pbtypes "github.com/gogo/protobuf/types"
)
@@ -49,7 +50,26 @@ type progressTracker struct {
// res tracks the amount of data that has been ingested.
res roachpb.RowCount
+
+ // Note that the fields below are used for the deprecated high watermark progress
+ // tracker.
+ // highWaterMark represents the index into the requestsCompleted map.
+ highWaterMark int64
+ ceiling int64
+
+ // As part of job progress tracking, inFlightImportSpans tracks all the
+ // spans that have been generated are being processed by the processors in
+ // distRestore. requestsCompleleted tracks the spans from
+ // inFlightImportSpans that have completed its processing. Once all spans up
+ // to index N have been processed (and appear in requestsCompleted), then
+ // any spans with index < N will be removed from both inFlightImportSpans
+ // and requestsCompleted maps.
+ inFlightImportSpans map[int64]roachpb.Span
+ requestsCompleted map[int64]bool
}
+ useFrontier bool
+ inFlightSpanFeeder chan execinfrapb.RestoreSpanEntry
+
// endTime is the restore as of timestamp. This can be empty, and an empty timestamp
// indicates a restore of the latest revision.
endTime hlc.Timestamp
@@ -58,6 +78,7 @@ type progressTracker struct {
func makeProgressTracker(
requiredSpans roachpb.Spans,
persistedSpans []jobspb.RestoreProgress_FrontierEntry,
+ useFrontier bool,
maxBytes int64,
endTime hlc.Timestamp,
) (*progressTracker, error) {
@@ -66,20 +87,32 @@ func makeProgressTracker(
checkpointFrontier spanUtils.Frontier
err error
nextRequiredSpanKey map[string]roachpb.Key
+ inFlightSpanFeeder chan execinfrapb.RestoreSpanEntry
)
- checkpointFrontier, err = loadCheckpointFrontier(requiredSpans, persistedSpans)
- if err != nil {
- return nil, err
- }
- nextRequiredSpanKey = make(map[string]roachpb.Key)
- for i := 0; i < len(requiredSpans)-1; i++ {
- nextRequiredSpanKey[requiredSpans[i].EndKey.String()] = requiredSpans[i+1].Key
+ if useFrontier {
+ checkpointFrontier, err = loadCheckpointFrontier(requiredSpans, persistedSpans)
+ if err != nil {
+ return nil, err
+ }
+ nextRequiredSpanKey = make(map[string]roachpb.Key)
+ for i := 0; i < len(requiredSpans)-1; i++ {
+ nextRequiredSpanKey[requiredSpans[i].EndKey.String()] = requiredSpans[i+1].Key
+ }
+
+ } else {
+ inFlightSpanFeeder = make(chan execinfrapb.RestoreSpanEntry, 1000)
}
pt := &progressTracker{}
pt.mu.checkpointFrontier = checkpointFrontier
+ pt.mu.highWaterMark = -1
+ pt.mu.ceiling = 0
+ pt.mu.inFlightImportSpans = make(map[int64]roachpb.Span)
+ pt.mu.requestsCompleted = make(map[int64]bool)
pt.nextRequiredSpanKey = nextRequiredSpanKey
pt.maxBytes = maxBytes
+ pt.useFrontier = useFrontier
+ pt.inFlightSpanFeeder = inFlightSpanFeeder
pt.endTime = endTime
return pt, nil
}
@@ -149,10 +182,16 @@ func (pt *progressTracker) updateJobCallback(
func() {
pt.mu.Lock()
defer pt.mu.Unlock()
- // TODO (msbutler): this requires iterating over every span in the frontier,
- // and rewriting every completed required span to disk.
- // We may want to be more intelligent about this.
- d.Restore.Checkpoint = persistFrontier(pt.mu.checkpointFrontier, pt.maxBytes)
+ if pt.useFrontier {
+ // TODO (msbutler): this requires iterating over every span in the frontier,
+ // and rewriting every completed required span to disk.
+ // We may want to be more intelligent about this.
+ d.Restore.Checkpoint = persistFrontier(pt.mu.checkpointFrontier, pt.maxBytes)
+ } else {
+ if pt.mu.highWaterMark >= 0 {
+ d.Restore.HighWater = pt.mu.inFlightImportSpans[pt.mu.highWaterMark].Key
+ }
+ }
}()
default:
log.Errorf(progressedCtx, "job payload had unexpected type %T", d)
@@ -185,48 +224,83 @@ func (pt *progressTracker) ingestUpdate(
}
pt.mu.res.Add(progDetails.Summary)
- updateSpan := progDetails.DataSpan.Clone()
- // If the completedSpan has the same end key as a requiredSpan_i, forward
- // the frontier for the span [completedSpan_startKey,
- // requiredSpan_i+1_startKey]. This trick ensures the span frontier will
- // contain a single entry when the restore completes. Recall that requiredSpans are
- // disjoint, and a spanFrontier never merges disjoint spans. So, without
- // this trick, the spanFrontier will have O(requiredSpans) entries when the
- // restore completes. This trick ensures all spans persisted to the frontier are adjacent,
- // and consequently, will eventually merge.
- //
- // Here's a visual example:
- // - this restore has two required spans: [a,d) and [e,h).
- // - the restore span entry [c,d) just completed, implying the frontier logically looks like:
- //
- // tC| x---o
- // t0|
- // keys--a---b---c---d---e---f---g---h->
- //
- // r-spans: |---span1---| |---span2---|
- //
- // - since [c,d)'s endkey equals the required span (a,d]'s endkey,
- // also update the gap between required span 1 and 2 in the frontier:
- //
- // tC| x-------o
- // t0|
- // keys--a---b---c---d---e---f---g---h->
- //
- // r-spans: |---span1---| |---span2---|
- //
- // - this will ensure that when all subspans in required spans 1 and 2 complete,
- // the checkpoint frontier has one span:
- //
- // tC| x---------------------------o
- // t0|
- // keys--a---b---c---d---e---f---g---h->
- //
- // r-spans: |---span1---| |---span2---|
- if newEndKey, ok := pt.nextRequiredSpanKey[updateSpan.EndKey.String()]; ok {
- updateSpan.EndKey = newEndKey
- }
- if _, err := pt.mu.checkpointFrontier.Forward(updateSpan, completedSpanTime); err != nil {
- return false, err
+ if pt.useFrontier {
+ updateSpan := progDetails.DataSpan.Clone()
+ // If the completedSpan has the same end key as a requiredSpan_i, forward
+ // the frontier for the span [completedSpan_startKey,
+ // requiredSpan_i+1_startKey]. This trick ensures the span frontier will
+ // contain a single entry when the restore completes. Recall that requiredSpans are
+ // disjoint, and a spanFrontier never merges disjoint spans. So, without
+ // this trick, the spanFrontier will have O(requiredSpans) entries when the
+ // restore completes. This trick ensures all spans persisted to the frontier are adjacent,
+ // and consequently, will eventually merge.
+ //
+ // Here's a visual example:
+ // - this restore has two required spans: [a,d) and [e,h).
+ // - the restore span entry [c,d) just completed, implying the frontier logically looks like:
+ //
+ // tC| x---o
+ // t0|
+ // keys--a---b---c---d---e---f---g---h->
+ //
+ // r-spans: |---span1---| |---span2---|
+ //
+ // - since [c,d)'s endkey equals the required span (a,d]'s endkey,
+ // also update the gap between required span 1 and 2 in the frontier:
+ //
+ // tC| x-------o
+ // t0|
+ // keys--a---b---c---d---e---f---g---h->
+ //
+ // r-spans: |---span1---| |---span2---|
+ //
+ // - this will ensure that when all subspans in required spans 1 and 2 complete,
+ // the checkpoint frontier has one span:
+ //
+ // tC| x---------------------------o
+ // t0|
+ // keys--a---b---c---d---e---f---g---h->
+ //
+ // r-spans: |---span1---| |---span2---|
+ if newEndKey, ok := pt.nextRequiredSpanKey[updateSpan.EndKey.String()]; ok {
+ updateSpan.EndKey = newEndKey
+ }
+ if _, err := pt.mu.checkpointFrontier.Forward(updateSpan, completedSpanTime); err != nil {
+ return false, err
+ }
+ } else {
+ idx := progDetails.ProgressIdx
+
+ if idx >= pt.mu.ceiling {
+ for i := pt.mu.ceiling; i <= idx; i++ {
+ importSpan, ok := <-pt.inFlightSpanFeeder
+ if !ok {
+ // The channel has been closed, there is nothing left to do.
+ log.Infof(ctx, "exiting restore checkpoint loop as the import span channel has been closed")
+ return true, nil
+ }
+ pt.mu.inFlightImportSpans[i] = importSpan.Span
+ }
+ pt.mu.ceiling = idx + 1
+ }
+
+ if sp, ok := pt.mu.inFlightImportSpans[idx]; ok {
+ // Assert that we're actually marking the correct span done. See #23977.
+ if !sp.Key.Equal(progDetails.DataSpan.Key) {
+ return false, errors.Newf("request %d for span %v does not match import span for same idx: %v",
+ idx, progDetails.DataSpan, sp,
+ )
+ }
+ pt.mu.requestsCompleted[idx] = true
+ prevHighWater := pt.mu.highWaterMark
+ for j := pt.mu.highWaterMark + 1; j < pt.mu.ceiling && pt.mu.requestsCompleted[j]; j++ {
+ pt.mu.highWaterMark = j
+ }
+ for j := prevHighWater; j < pt.mu.highWaterMark; j++ {
+ delete(pt.mu.requestsCompleted, j)
+ delete(pt.mu.inFlightImportSpans, j)
+ }
+ }
}
return true, nil
}
diff --git a/pkg/ccl/backupccl/restore_progress_test.go b/pkg/ccl/backupccl/restore_progress_test.go
index 2e8ebdd92dba..7bb1fcacf731 100644
--- a/pkg/ccl/backupccl/restore_progress_test.go
+++ b/pkg/ccl/backupccl/restore_progress_test.go
@@ -82,7 +82,7 @@ func TestProgressTracker(t *testing.T) {
},
} {
restoreTime := hlc.Timestamp{}
- pt, err := makeProgressTracker(requiredSpans, persistedSpans, 0, restoreTime)
+ pt, err := makeProgressTracker(requiredSpans, persistedSpans, true, 0, restoreTime)
require.NoError(t, err, "step %d", i)
done, err := pt.ingestUpdate(ctx, mockUpdate(step.update, step.completeUpTo))
diff --git a/pkg/ccl/backupccl/restore_span_covering.go b/pkg/ccl/backupccl/restore_span_covering.go
index 8bdde3b44754..a1a53689ebac 100644
--- a/pkg/ccl/backupccl/restore_span_covering.go
+++ b/pkg/ccl/backupccl/restore_span_covering.go
@@ -140,18 +140,22 @@ func createIntroducedSpanFrontier(
// spanCoveringFilter holds metadata that filters which backups and required spans are used to
// populate a restoreSpanEntry
type spanCoveringFilter struct {
- checkpointFrontier spanUtils.Frontier
- introducedSpanFrontier spanUtils.Frontier
- targetSize int64
- maxFileCount int
+ checkpointFrontier spanUtils.Frontier
+ highWaterMark roachpb.Key
+ introducedSpanFrontier spanUtils.Frontier
+ useFrontierCheckpointing bool
+ targetSize int64
+ maxFileCount int
}
func makeSpanCoveringFilter(
requiredSpans roachpb.Spans,
checkpointedSpans []jobspb.RestoreProgress_FrontierEntry,
+ highWater roachpb.Key,
introducedSpanFrontier spanUtils.Frontier,
targetSize int64,
maxFileCount int64,
+ useFrontierCheckpointing bool,
) (spanCoveringFilter, error) {
f, err := loadCheckpointFrontier(requiredSpans, checkpointedSpans)
if err != nil {
@@ -165,10 +169,12 @@ func makeSpanCoveringFilter(
maxFileCount = defaultMaxFileCount
}
sh := spanCoveringFilter{
- introducedSpanFrontier: introducedSpanFrontier,
- targetSize: targetSize,
- maxFileCount: int(maxFileCount),
- checkpointFrontier: f,
+ introducedSpanFrontier: introducedSpanFrontier,
+ targetSize: targetSize,
+ maxFileCount: int(maxFileCount),
+ highWaterMark: highWater,
+ useFrontierCheckpointing: useFrontierCheckpointing,
+ checkpointFrontier: f,
}
return sh, nil
}
@@ -176,7 +182,16 @@ func makeSpanCoveringFilter(
// filterCompleted returns the subspans of the requiredSpan that still need to be
// restored.
func (f spanCoveringFilter) filterCompleted(requiredSpan roachpb.Span) roachpb.Spans {
- return f.findToDoSpans(requiredSpan)
+ if f.useFrontierCheckpointing {
+ return f.findToDoSpans(requiredSpan)
+ }
+ if requiredSpan.EndKey.Compare(f.highWaterMark) <= 0 {
+ return roachpb.Spans{}
+ }
+ if requiredSpan.Key.Compare(f.highWaterMark) < 0 {
+ requiredSpan.Key = f.highWaterMark
+ }
+ return []roachpb.Span{requiredSpan}
}
// findToDoSpans returns the sub spans within the required span that have not completed.
diff --git a/pkg/ccl/backupccl/restore_span_covering_test.go b/pkg/ccl/backupccl/restore_span_covering_test.go
index 9f26892d3a94..ae80f63bea49 100644
--- a/pkg/ccl/backupccl/restore_span_covering_test.go
+++ b/pkg/ccl/backupccl/restore_span_covering_test.go
@@ -268,6 +268,7 @@ func makeImportSpans(
spans []roachpb.Span,
backups []backuppb.BackupManifest,
layerToIterFactory backupinfo.LayerToBackupManifestFileIterFactory,
+ highWaterMark []byte,
targetSize int64,
introducedSpanFrontier spanUtils.Frontier,
completedSpans []jobspb.RestoreProgress_FrontierEntry,
@@ -285,10 +286,11 @@ func makeImportSpans(
filter, err := makeSpanCoveringFilter(
spans,
completedSpans,
+ highWaterMark,
introducedSpanFrontier,
targetSize,
defaultMaxFileCount,
- )
+ highWaterMark == nil)
if err != nil {
return nil, err
}
@@ -415,6 +417,7 @@ func TestRestoreEntryCoverExample(t *testing.T) {
spans,
backups,
layerToIterFactory,
+ nil,
noSpanTargetSize,
emptySpanFrontier,
emptyCompletedSpans)
@@ -436,6 +439,7 @@ func TestRestoreEntryCoverExample(t *testing.T) {
spans,
backups,
layerToIterFactory,
+ nil,
2<<20,
emptySpanFrontier,
emptyCompletedSpans)
@@ -459,6 +463,7 @@ func TestRestoreEntryCoverExample(t *testing.T) {
spans,
backups,
layerToIterFactory,
+ nil,
noSpanTargetSize,
introducedSpanFrontier,
emptyCompletedSpans)
@@ -487,6 +492,7 @@ func TestRestoreEntryCoverExample(t *testing.T) {
spans,
backups,
layerToIterFactory,
+ nil,
noSpanTargetSize,
emptySpanFrontier,
persistFrontier(frontier, 0))
@@ -514,6 +520,7 @@ func TestRestoreEntryCoverExample(t *testing.T) {
spans,
backups,
layerToIterFactory,
+ nil,
noSpanTargetSize,
emptySpanFrontier,
emptyCompletedSpans)
@@ -682,9 +689,10 @@ func TestCheckpointFilter(t *testing.T) {
[]roachpb.Span{requiredSpan},
checkpointedSpans,
nil,
+ nil,
0,
defaultMaxFileCount,
- )
+ true)
require.NoError(t, err)
defer f.close()
require.Equal(t, tc.expectedToDoSpans, f.filterCompleted(requiredSpan))
@@ -824,6 +832,7 @@ func runTestRestoreEntryCoverForSpanAndFileCounts(
backups[numBackups-1].Spans,
backups,
layerToIterFactory,
+ nil,
target<<20,
introducedSpanFrontier,
[]jobspb.RestoreProgress_FrontierEntry{})
@@ -836,23 +845,32 @@ func runTestRestoreEntryCoverForSpanAndFileCounts(
if len(cover) > 0 {
for n := 1; n <= 5; n++ {
var completedSpans []roachpb.Span
+ var highWater []byte
var frontierEntries []jobspb.RestoreProgress_FrontierEntry
// Randomly choose to use frontier checkpointing instead of
// explicitly testing both forms to avoid creating an exponential
// number of tests.
- completedSpans = getRandomCompletedSpans(cover, n)
- for _, sp := range completedSpans {
- frontierEntries = append(frontierEntries, jobspb.RestoreProgress_FrontierEntry{
- Span: sp,
- Timestamp: completedSpanTime,
- })
+ useFrontierCheckpointing := rand.Intn(2) == 0
+ if useFrontierCheckpointing {
+ completedSpans = getRandomCompletedSpans(cover, n)
+ for _, sp := range completedSpans {
+ frontierEntries = append(frontierEntries, jobspb.RestoreProgress_FrontierEntry{
+ Span: sp,
+ Timestamp: completedSpanTime,
+ })
+ }
+ } else {
+ idx := r.Intn(len(cover))
+ highWater = cover[idx].Span.EndKey
}
+
resumeCover, err := makeImportSpans(
ctx,
backups[numBackups-1].Spans,
backups,
layerToIterFactory,
+ highWater,
target<<20,
introducedSpanFrontier,
frontierEntries)
@@ -862,11 +880,21 @@ func runTestRestoreEntryCoverForSpanAndFileCounts(
// completed spans from the original required spans.
var resumedRequiredSpans roachpb.Spans
for _, origReq := range backups[numBackups-1].Spans {
- resumeReq := roachpb.SubtractSpans([]roachpb.Span{origReq}, completedSpans)
+ var resumeReq []roachpb.Span
+ if useFrontierCheckpointing {
+ resumeReq = roachpb.SubtractSpans([]roachpb.Span{origReq}, completedSpans)
+ } else {
+ resumeReq = roachpb.SubtractSpans([]roachpb.Span{origReq}, []roachpb.Span{{Key: cover[0].Span.Key, EndKey: highWater}})
+ }
resumedRequiredSpans = append(resumedRequiredSpans, resumeReq...)
}
- errorMsg := fmt.Sprintf("completed spans in frontier: %v", completedSpans)
+ var errorMsg string
+ if useFrontierCheckpointing {
+ errorMsg = fmt.Sprintf("completed spans in frontier: %v", completedSpans)
+ } else {
+ errorMsg = fmt.Sprintf("highwater: %v", highWater)
+ }
require.NoError(t, checkRestoreCovering(ctx, backups, resumedRequiredSpans,
resumeCover, target != noSpanTargetSize, execCfg.DistSQLSrv.ExternalStorage),
@@ -1005,7 +1033,7 @@ func TestRestoreEntryCoverZeroSizeFiles(t *testing.T) {
expectedCover = tt.expectedCoverGenerated
}
- cover, err := makeImportSpans(ctx, tt.requiredSpans, backups, layerToIterFactory, noSpanTargetSize, emptySpanFrontier, emptyCompletedSpans)
+ cover, err := makeImportSpans(ctx, tt.requiredSpans, backups, layerToIterFactory, nil, noSpanTargetSize, emptySpanFrontier, emptyCompletedSpans)
require.NoError(t, err)
simpleCover := make([]simpleRestoreSpanEntry, len(cover))
diff --git a/pkg/ccl/backupccl/testdata/backup-restore/backup-permissions-deprecated b/pkg/ccl/backupccl/testdata/backup-restore/backup-permissions-deprecated
index ba45ab15f56b..106183ef40a1 100644
--- a/pkg/ccl/backupccl/testdata/backup-restore/backup-permissions-deprecated
+++ b/pkg/ccl/backupccl/testdata/backup-restore/backup-permissions-deprecated
@@ -143,7 +143,7 @@ GRANT CONNECT ON DATABASE d TO testuser;
----
exec-sql cluster=s3 user=testuser
-SHOW BACKUP FROM LATEST IN 'http://COCKROACH_TEST_HTTP_SERVER/'
+SHOW BACKUP 'http://COCKROACH_TEST_HTTP_SERVER/'
----
pq: only users with the admin role or the EXTERNALIOIMPLICITACCESS system privilege are allowed to access the specified http URI
@@ -161,6 +161,6 @@ CREATE USER testuser
----
exec-sql cluster=s4 user=testuser
-SHOW BACKUP FROM LATEST IN 'http://COCKROACH_TEST_HTTP_SERVER/'
+SHOW BACKUP 'http://COCKROACH_TEST_HTTP_SERVER/'
----
-pq: read LATEST path: external http access disabled
+pq: make storage: external http access disabled
diff --git a/pkg/ccl/backupccl/utils_test.go b/pkg/ccl/backupccl/utils_test.go
index 20c2536add26..1bbb2a4f0d98 100644
--- a/pkg/ccl/backupccl/utils_test.go
+++ b/pkg/ccl/backupccl/utils_test.go
@@ -577,16 +577,3 @@ func requireRecoveryEvent(
return nil
})
}
-
-// getFullBackupPaths finds all full backups in the given URI and returns their paths using SHOW BACKUPS IN
-func getFullBackupPaths(t *testing.T, sqlDB *sqlutils.SQLRunner, uri string) []string {
- t.Helper()
- var fullBackupPaths []string
- rows := sqlDB.Query(t, `SELECT path FROM [SHOW BACKUPS IN $1]`, uri)
- for rows.Next() {
- var path string
- require.NoError(t, rows.Scan(&path))
- fullBackupPaths = append(fullBackupPaths, path)
- }
- return fullBackupPaths
-}
diff --git a/pkg/ccl/changefeedccl/BUILD.bazel b/pkg/ccl/changefeedccl/BUILD.bazel
index 98367b131975..2f01b95f9eec 100644
--- a/pkg/ccl/changefeedccl/BUILD.bazel
+++ b/pkg/ccl/changefeedccl/BUILD.bazel
@@ -319,8 +319,10 @@ go_test(
"//pkg/sql/sessiondatapb",
"//pkg/sql/types",
"//pkg/storage/enginepb",
+ "//pkg/storage/fs",
"//pkg/testutils",
"//pkg/testutils/jobutils",
+ "//pkg/testutils/listenerutil",
"//pkg/testutils/serverutils",
"//pkg/testutils/skip",
"//pkg/testutils/sqlutils",
diff --git a/pkg/ccl/changefeedccl/alter_changefeed_test.go b/pkg/ccl/changefeedccl/alter_changefeed_test.go
index 3e33dfbf7567..e1243c9db9eb 100644
--- a/pkg/ccl/changefeedccl/alter_changefeed_test.go
+++ b/pkg/ccl/changefeedccl/alter_changefeed_test.go
@@ -278,8 +278,6 @@ func TestAlterChangefeedSwitchFamily(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- require.NoError(t, log.SetVModule("helpers_test=1"))
-
testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(s.DB)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, FAMILY onlya (a), FAMILY onlyb (b))`)
diff --git a/pkg/ccl/changefeedccl/cdcevent/event_test.go b/pkg/ccl/changefeedccl/cdcevent/event_test.go
index 3f45e960d7c3..0aafc0c957b8 100644
--- a/pkg/ccl/changefeedccl/cdcevent/event_test.go
+++ b/pkg/ccl/changefeedccl/cdcevent/event_test.go
@@ -598,9 +598,6 @@ func TestEventColumnOrderingWithSchemaChanges(t *testing.T) {
sqlDB := sqlutils.MakeSQLRunner(db)
// Use alter column type to force column reordering.
sqlDB.Exec(t, `SET enable_experimental_alter_column_type_general = true`)
- // TODO(#133040): force the legacy schema changer. When run with the DSC,
- // the ordering changes in the column family. This needs to be revisited in 133040.
- sqlDB.Exec(t, `SET use_declarative_schema_changer = 'off'`)
type decodeExpectation struct {
expectUnwatchedErr bool
diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go
index 72487f214956..689ff0ba9c51 100644
--- a/pkg/ccl/changefeedccl/changefeed_test.go
+++ b/pkg/ccl/changefeedccl/changefeed_test.go
@@ -68,8 +68,10 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/randgen"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
+ "github.com/cockroachdb/cockroach/pkg/storage/fs"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/jobutils"
+ "github.com/cockroachdb/cockroach/pkg/testutils/listenerutil"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
@@ -2352,7 +2354,7 @@ func TestChangefeedSchemaChangeBackfillCheckpoint(t *testing.T) {
// Checkpoint progress frequently, and set the checkpoint size limit.
changefeedbase.FrontierCheckpointFrequency.Override(
- context.Background(), &s.Server.ClusterSettings().SV, 10*time.Millisecond)
+ context.Background(), &s.Server.ClusterSettings().SV, 1)
changefeedbase.FrontierCheckpointMaxBytes.Override(
context.Background(), &s.Server.ClusterSettings().SV, maxCheckpointSize)
@@ -6461,6 +6463,117 @@ func TestChangefeedHandlesRollingRestart(t *testing.T) {
}
}
+// TestChangefeedTimelyResolvedTimestampUpdatePostRollingRestart verifies that
+// a changefeed over a large number of quiesced ranges is able to quickly
+// advance its resolved timestamp after a rolling restart. At the lowest level,
+// the test ensures that lease acquisitions required to advance the closed
+// timestamp of the constituent changefeed ranges is fast.
+func TestChangefeedTimelyResolvedTimestampUpdatePostRollingRestart(t *testing.T) {
+ defer leaktest.AfterTest(t)()
+ defer log.Scope(t).Close(t)
+
+ // This test requires many range splits, which can be slow under certain test
+ // conditions. Skip potentially slow tests.
+ skip.UnderDeadlock(t)
+ skip.UnderRace(t)
+
+ opts := makeOptions()
+ defer addCloudStorageOptions(t, &opts)()
+ opts.forceRootUserConnection = true
+ defer changefeedbase.TestingSetDefaultMinCheckpointFrequency(testSinkFlushFrequency)()
+ defer testingUseFastRetry()()
+ const numNodes = 3
+
+ stickyVFSRegistry := fs.NewStickyRegistry()
+ listenerReg := listenerutil.NewListenerRegistry()
+ defer listenerReg.Close()
+
+ perServerKnobs := make(map[int]base.TestServerArgs, numNodes)
+ for i := 0; i < numNodes; i++ {
+ perServerKnobs[i] = base.TestServerArgs{
+ Knobs: base.TestingKnobs{
+ DistSQL: &execinfra.TestingKnobs{
+ Changefeed: &TestingKnobs{},
+ },
+ JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(),
+ Server: &server.TestingKnobs{
+ StickyVFSRegistry: stickyVFSRegistry,
+ },
+ },
+ ExternalIODir: opts.externalIODir,
+ UseDatabase: "d",
+ }
+ }
+
+ tc := serverutils.StartCluster(t, numNodes,
+ base.TestClusterArgs{
+ ServerArgsPerNode: perServerKnobs,
+ ServerArgs: base.TestServerArgs{
+ // Test uses SPLIT AT, which isn't currently supported for
+ // secondary tenants. Tracked with #76378.
+ DefaultTestTenant: base.TODOTestTenantDisabled,
+ },
+ ReusableListenerReg: listenerReg,
+ })
+ defer tc.Stopper().Stop(context.Background())
+
+ db := tc.ServerConn(1)
+ sqlDB := sqlutils.MakeSQLRunner(db)
+ serverutils.SetClusterSetting(t, tc, "kv.rangefeed.enabled", true)
+
+ // Create a table with 1000 ranges.
+ sqlDB.ExecMultiple(t,
+ `CREATE DATABASE d;`,
+ `CREATE TABLE d.foo (k INT PRIMARY KEY);`,
+ `INSERT INTO d.foo (k) SELECT * FROM generate_series(1, 1000);`,
+ `ALTER TABLE d.foo SPLIT AT (SELECT * FROM generate_series(1, 1000));`,
+ )
+
+ // Wait for ranges to quiesce.
+ testutils.SucceedsSoon(t, func() error {
+ for i := range tc.NumServers() {
+ store, err := tc.Server(i).GetStores().(*kvserver.Stores).GetStore(tc.Server(i).GetFirstStoreID())
+ require.NoError(t, err)
+ numQuiescent := store.Metrics().QuiescentCount.Value()
+ numQualifyForQuiesence := store.Metrics().LeaseEpochCount.Value()
+ if numQuiescent < numQualifyForQuiesence {
+ return errors.Newf(
+ "waiting for ranges to quiesce on node %d; quiescent: %d; should quiesce: %d",
+ tc.Server(i).NodeID(), numQuiescent, numQualifyForQuiesence,
+ )
+ }
+ }
+ return nil
+ })
+
+ // Capture the pre-restart timestamp. We'll use this as the start time for the
+ // changefeed later.
+ var tsLogical string
+ sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&tsLogical)
+
+ // Perform the rolling restart.
+ require.NoError(t, tc.Restart())
+
+ // For validation, the test requires an enterprise feed.
+ feedTestEnterpriseSinks(&opts)
+ sinkType := randomSinkTypeWithOptions(opts)
+ f, closeSink := makeFeedFactoryWithOptions(t, sinkType, tc, tc.ServerConn(0), opts)
+ defer closeSink()
+ // The end time is captured post restart. The changefeed spans from before the
+ // restart to after.
+ endTime := tc.Server(0).Clock().Now().AddDuration(5 * time.Second)
+ testFeed := feed(t, f, `CREATE CHANGEFEED FOR d.foo WITH cursor=$1, end_time=$2`,
+ tsLogical, eval.TimestampToDecimalDatum(endTime).String())
+ defer closeFeed(t, testFeed)
+
+ defer DiscardMessages(testFeed)()
+
+ // Ensure the changefeed is able to complete in a reasonable amount of time.
+ require.NoError(t, testFeed.(cdctest.EnterpriseTestFeed).WaitForStatus(func(s jobs.Status) bool {
+ return s == jobs.StatusSucceeded
+ }))
+}
+
func TestChangefeedPropagatesTerminalError(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
@@ -6646,6 +6759,8 @@ INSERT INTO foo VALUES (1, 'f');
assertPayloads(t, foo, []string{
`foo: [6, "a"]->{"after": {"a": 6, "b": "a"}}`,
`foo: [14, "e"]->{"after": {"a": 5, "b": "e"}}`,
+ })
+ assertPayloads(t, foo, []string{
`foo: [1]->{"after": {"a": 1, "b": "f"}}`,
})
}
diff --git a/pkg/ccl/crosscluster/logical/dead_letter_queue.go b/pkg/ccl/crosscluster/logical/dead_letter_queue.go
index 2d2a6a3782b6..59a3807fda12 100644
--- a/pkg/ccl/crosscluster/logical/dead_letter_queue.go
+++ b/pkg/ccl/crosscluster/logical/dead_letter_queue.go
@@ -22,10 +22,7 @@ import (
const (
dlqSchemaName = "crdb_replication"
// dlqBaseTableName is defined as: "..dlq___"
- dlqBaseTableName = "%s.%s.%s"
- createEnumBaseStmt = `CREATE TYPE IF NOT EXISTS %s.%s.mutation_type AS ENUM (
- 'insert', 'update', 'delete'
- )`
+ dlqBaseTableName = "%s.%s.%s"
createSchemaBaseStmt = `CREATE SCHEMA IF NOT EXISTS %s.%s`
createTableBaseStmt = `CREATE TABLE IF NOT EXISTS %s (
id INT8 DEFAULT unique_rowid(),
@@ -33,7 +30,7 @@ const (
table_id INT8 NOT NULL,
dlq_timestamp TIMESTAMPTZ NOT NULL DEFAULT now():::TIMESTAMPTZ,
dlq_reason STRING NOT NULL,
- mutation_type %s.%s.mutation_type,
+ mutation_type STRING NOT NULL,
key_value_bytes BYTES NOT NULL NOT VISIBLE,
incoming_row JSONB,
-- PK should be unique based on the ID, job ID and timestamp at which the
@@ -111,11 +108,11 @@ func (dlq *noopDeadLetterQueueClient) Log(
}
tableID := cdcEventRow.TableID
- var mutationType replicationMutationType
+ var mutationType string
if cdcEventRow.IsDeleted() {
- mutationType = deleteMutation
+ mutationType = deleteMutation.String()
} else {
- mutationType = insertMutation
+ mutationType = insertMutation.String()
}
bytes, err := protoutil.Marshal(&kv)
@@ -129,7 +126,7 @@ func (dlq *noopDeadLetterQueueClient) Log(
mutation_type: %s,
key_value_bytes: %v,
incoming_row: %s`,
- ingestionJobID, tableID, reason.Error(), stoppedRetryReason.String(), mutationType.String(), bytes, cdcEventRow.DebugString())
+ ingestionJobID, tableID, reason.Error(), stoppedRetryReason.String(), mutationType, bytes, cdcEventRow.DebugString())
return nil
}
@@ -147,12 +144,7 @@ func (dlq *deadLetterQueueClient) Create(ctx context.Context) error {
return errors.Wrapf(err, "failed to create crdb_replication schema in database %s", dstTableMeta.getDatabaseName())
}
- createEnumStmt := fmt.Sprintf(createEnumBaseStmt, dstTableMeta.getDatabaseName(), dlqSchemaName)
- if _, err := dlq.ie.Exec(ctx, "create-dlq-enum", nil, createEnumStmt); err != nil {
- return errors.Wrapf(err, "failed to create mutation_type enum in database %s", dstTableMeta.getDatabaseName())
- }
-
- createTableStmt := fmt.Sprintf(createTableBaseStmt, dlqTableName, dstTableMeta.getDatabaseName(), dlqSchemaName)
+ createTableStmt := fmt.Sprintf(createTableBaseStmt, dlqTableName)
if _, err := dlq.ie.Exec(ctx, "create-dlq-table", nil, createTableStmt); err != nil {
return errors.Wrapf(err, "failed to create dlq for table %d", dstTableMeta.tableID)
}
@@ -186,11 +178,11 @@ func (dlq *deadLetterQueueClient) Log(
}
// TODO(azhu): include update type
- var mutationType replicationMutationType
+ var mutationType string
if cdcEventRow.IsDeleted() {
- mutationType = deleteMutation
+ mutationType = deleteMutation.String()
} else {
- mutationType = insertMutation
+ mutationType = insertMutation.String()
}
jsonRow, err := cdcEventRow.ToJSON()
@@ -204,7 +196,7 @@ func (dlq *deadLetterQueueClient) Log(
ingestionJobID,
dstTableMeta.tableID,
fmt.Sprintf("%s (%s)", reason, stoppedRetryingReason),
- mutationType.String(),
+ mutationType,
bytes,
); err != nil {
return errors.Wrapf(err, "failed to insert row for table %s without json", dlqTableName)
@@ -220,7 +212,7 @@ func (dlq *deadLetterQueueClient) Log(
ingestionJobID,
dstTableMeta.tableID,
fmt.Sprintf("%s (%s)", reason, stoppedRetryingReason),
- mutationType.String(),
+ mutationType,
bytes,
jsonRow,
); err != nil {
diff --git a/pkg/ccl/crosscluster/logical/dead_letter_queue_test.go b/pkg/ccl/crosscluster/logical/dead_letter_queue_test.go
index 5a76f94d4aa3..c50115dbaa36 100644
--- a/pkg/ccl/crosscluster/logical/dead_letter_queue_test.go
+++ b/pkg/ccl/crosscluster/logical/dead_letter_queue_test.go
@@ -240,14 +240,11 @@ func TestDLQCreation(t *testing.T) {
slices.Sort(actualDQLTables)
require.Equal(t, expectedDLQTables, actualDQLTables)
- // Verify enum creation
- enumRow := [][]string{
- {dlqSchemaName, "mutation_type", "{insert,update,delete}"},
- }
+ // Verify that no custom enums were created
sqlDB.CheckQueryResults(t,
- fmt.Sprintf(`SELECT schema, name, values FROM [SHOW ENUMS FROM %s.%s]`, defaultDbName, dlqSchemaName), enumRow)
+ fmt.Sprintf(`SHOW ENUMS FROM %s.%s`, defaultDbName, dlqSchemaName), [][]string{})
sqlDB.CheckQueryResults(t,
- fmt.Sprintf(`SELECT schema, name, values FROM [SHOW ENUMS FROM %s.%s]`, dbAName, dlqSchemaName), enumRow)
+ fmt.Sprintf(`SHOW ENUMS FROM %s.%s`, dbAName, dlqSchemaName), [][]string{})
}
func TestDLQLogging(t *testing.T) {
diff --git a/pkg/ccl/crosscluster/logical/logical_replication_job_test.go b/pkg/ccl/crosscluster/logical/logical_replication_job_test.go
index ee2e71811c02..0d217f31fb9e 100644
--- a/pkg/ccl/crosscluster/logical/logical_replication_job_test.go
+++ b/pkg/ccl/crosscluster/logical/logical_replication_job_test.go
@@ -306,6 +306,34 @@ func testLogicalStreamIngestionJobBasic(t *testing.T, mode string) {
}
dbA.CheckQueryResults(t, "SELECT * from a.tab", expectedRows)
dbB.CheckQueryResults(t, "SELECT * from b.tab", expectedRows)
+
+ // Verify that we didn't have the data looping problem. These
+ // expecations are for how many operations happend on the
+ // a-side.
+ //
+ // These assertions feel likely to flake since they assume
+ // that the test runner is fast enough to beat the replication
+ // stream when applying subsequent operations.
+ if !skip.Duress() {
+ var expPuts, expCPuts int64
+ if mode == "validated" {
+ expPuts, expCPuts = 3, 3
+ if tryOptimisticInsertEnabled.Get(&s.ClusterSettings().SV) {
+ // When performing 1 update, we don't have the prevValue set, so if
+ // we're using the optimistic insert strategy, it would result in an
+ // additional CPut (that ultimately fails). The cluster setting is
+ // randomized in tests, so we need to handle both cases.
+ expCPuts++
+ }
+ } else if mode == "immediate" {
+ expPuts, expCPuts = 1, 7
+ } else {
+ t.Fatalf("no put/cput expectations for unknown mode: %s", mode)
+ }
+
+ require.Equal(t, expPuts, numPuts.Load())
+ require.Equal(t, expCPuts, numCPuts.Load())
+ }
}
func TestLogicalStreamIngestionJobWithCursor(t *testing.T) {
diff --git a/pkg/ccl/crosscluster/logical/logical_replication_writer_processor.go b/pkg/ccl/crosscluster/logical/logical_replication_writer_processor.go
index 814a9e0fe910..6d109c0ef4d1 100644
--- a/pkg/ccl/crosscluster/logical/logical_replication_writer_processor.go
+++ b/pkg/ccl/crosscluster/logical/logical_replication_writer_processor.go
@@ -1042,7 +1042,7 @@ func (t *txnBatch) HandleBatch(
if err != nil {
return stats, err
}
- stats.Add(s)
+ stats.optimisticInsertConflicts += s.optimisticInsertConflicts
} else {
err = t.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error {
for _, kv := range batch {
@@ -1053,7 +1053,7 @@ func (t *txnBatch) HandleBatch(
if err != nil {
return err
}
- stats.Add(s)
+ stats.optimisticInsertConflicts += s.optimisticInsertConflicts
}
return nil
}, isql.WithSessionData(t.sd))
diff --git a/pkg/ccl/crosscluster/physical/alter_replication_job.go b/pkg/ccl/crosscluster/physical/alter_replication_job.go
index d376802ef84b..70211fb82420 100644
--- a/pkg/ccl/crosscluster/physical/alter_replication_job.go
+++ b/pkg/ccl/crosscluster/physical/alter_replication_job.go
@@ -42,7 +42,7 @@ const (
)
var alterReplicationCutoverHeader = colinfo.ResultColumns{
- {Name: "cutover_time", Typ: types.Decimal},
+ {Name: "failover_time", Typ: types.Decimal},
}
// ResolvedTenantReplicationOptions represents options from an
@@ -605,12 +605,12 @@ func applyCutoverTime(
return job.WithTxn(txn).Update(ctx, func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error {
progress := md.Progress.GetStreamIngest()
details := md.Payload.GetStreamIngestion()
- if progress.ReplicationStatus == jobspb.ReplicationCuttingOver {
+ if progress.ReplicationStatus == jobspb.ReplicationFailingOver {
return errors.Newf("job %d already started cutting over to timestamp %s",
job.ID(), progress.CutoverTime)
}
- progress.ReplicationStatus = jobspb.ReplicationPendingCutover
+ progress.ReplicationStatus = jobspb.ReplicationPendingFailover
// Update the sentinel being polled by the stream ingestion job to
// check if a complete has been signaled.
progress.CutoverTime = cutoverTimestamp
diff --git a/pkg/ccl/crosscluster/physical/alter_replication_job_test.go b/pkg/ccl/crosscluster/physical/alter_replication_job_test.go
index 21508901b4b3..5ae2855682d2 100644
--- a/pkg/ccl/crosscluster/physical/alter_replication_job_test.go
+++ b/pkg/ccl/crosscluster/physical/alter_replication_job_test.go
@@ -195,7 +195,7 @@ func TestAlterTenantUpdateExistingCutoverTime(t *testing.T) {
}
getCutoverTime := func() hlc.Timestamp {
var cutoverStr string
- c.DestSysSQL.QueryRow(c.T, fmt.Sprintf("SELECT cutover_time FROM [SHOW TENANT %s WITH REPLICATION STATUS]",
+ c.DestSysSQL.QueryRow(c.T, fmt.Sprintf("SELECT failover_time FROM [SHOW TENANT %s WITH REPLICATION STATUS]",
c.Args.DestTenantName)).Scan(&cutoverStr)
cutoverOutput := replicationtestutils.DecimalTimeToHLC(t, cutoverStr)
return cutoverOutput
@@ -216,7 +216,7 @@ func TestAlterTenantUpdateExistingCutoverTime(t *testing.T) {
args.DestTenantName, cutoverTime.AsOfSystemTime()).Scan(&cutoverStr)
cutoverOutput := replicationtestutils.DecimalTimeToHLC(t, cutoverStr)
require.Equal(t, cutoverTime, cutoverOutput)
- require.Equal(c.T, "replication pending cutover", getTenantStatus())
+ require.Equal(c.T, "replication pending failover", getTenantStatus())
require.Equal(t, cutoverOutput, getCutoverTime())
// And cutover to an even further time.
@@ -225,7 +225,7 @@ func TestAlterTenantUpdateExistingCutoverTime(t *testing.T) {
args.DestTenantName, cutoverTime.AsOfSystemTime()).Scan(&cutoverStr)
cutoverOutput = replicationtestutils.DecimalTimeToHLC(t, cutoverStr)
require.Equal(t, cutoverTime, cutoverOutput)
- require.Equal(c.T, "replication pending cutover", getTenantStatus())
+ require.Equal(c.T, "replication pending failover", getTenantStatus())
require.Equal(t, cutoverOutput, getCutoverTime())
}
@@ -396,7 +396,7 @@ func TestTenantStatusWithFutureCutoverTime(t *testing.T) {
c.DestSysSQL.Exec(c.T, `ALTER TENANT $1 COMPLETE REPLICATION TO SYSTEM TIME $2::string`,
args.DestTenantName, cutoverTime)
- require.Equal(c.T, "replication pending cutover", getTenantStatus())
+ require.Equal(c.T, "replication pending failover", getTenantStatus())
c.DestSysSQL.Exec(c.T, `ALTER TENANT $1 COMPLETE REPLICATION TO LATEST`, args.DestTenantName)
unblockResumerExit()
jobutils.WaitForJobToSucceed(c.T, c.DestSysSQL, jobspb.JobID(ingestionJobID))
@@ -465,10 +465,10 @@ func TestTenantStatusWithLatestCutoverTime(t *testing.T) {
testutils.SucceedsSoon(t, func() error {
s := getTenantStatus()
- if s == "replication pending cutover" {
- return errors.Errorf("tenant status is still 'replication pending cutover', waiting")
+ if s == "replication pending failover" {
+ return errors.Errorf("tenant status is still 'replication pending failover', waiting")
}
- require.Equal(c.T, "replication cutting over", s)
+ require.Equal(c.T, "replication failing over", s)
return nil
})
diff --git a/pkg/ccl/crosscluster/physical/metrics.go b/pkg/ccl/crosscluster/physical/metrics.go
index 8d4084e2dd06..78fa2a136bee 100644
--- a/pkg/ccl/crosscluster/physical/metrics.go
+++ b/pkg/ccl/crosscluster/physical/metrics.go
@@ -84,7 +84,7 @@ var (
// ranges left to be reverted, but some may not have writes and therefore the
// revert will be a no-op for those ranges.
metaReplicationCutoverProgress = metric.Metadata{
- Name: "physical_replication.cutover_progress",
+ Name: "physical_replication.failover_progress",
Help: "The number of ranges left to revert in order to complete an inflight cutover",
Measurement: "Ranges",
Unit: metric.Unit_COUNT,
diff --git a/pkg/ccl/crosscluster/physical/replication_random_client_test.go b/pkg/ccl/crosscluster/physical/replication_random_client_test.go
index 1731793cbecf..b4cd0ed51264 100644
--- a/pkg/ccl/crosscluster/physical/replication_random_client_test.go
+++ b/pkg/ccl/crosscluster/physical/replication_random_client_test.go
@@ -210,7 +210,7 @@ func TestStreamIngestionJobWithRandomClient(t *testing.T) {
receivedRevertRequest = make(chan struct{})
_, err = conn.Exec(`SET CLUSTER SETTING bulkio.stream_ingestion.minimum_flush_interval= '0.0005ms'`)
require.NoError(t, err)
- _, err = conn.Exec(`SET CLUSTER SETTING bulkio.stream_ingestion.cutover_signal_poll_interval='1s'`)
+ _, err = conn.Exec(`SET CLUSTER SETTING bulkio.stream_ingestion.failover_signal_poll_interval='1s'`)
require.NoError(t, err)
streamAddr := getTestRandomClientURI(roachpb.MustMakeTenantID(oldTenantID), oldTenantName)
query := fmt.Sprintf(`CREATE TENANT "30" FROM REPLICATION OF "10" ON '%s'`, streamAddr)
diff --git a/pkg/ccl/crosscluster/physical/replication_stream_e2e_test.go b/pkg/ccl/crosscluster/physical/replication_stream_e2e_test.go
index ac1f7a562819..0713d03556be 100644
--- a/pkg/ccl/crosscluster/physical/replication_stream_e2e_test.go
+++ b/pkg/ccl/crosscluster/physical/replication_stream_e2e_test.go
@@ -1163,7 +1163,7 @@ func TestTenantStreamingShowTenant(t *testing.T) {
c.DestSysSQL.QueryRow(c.T, `ALTER TENANT $1 COMPLETE REPLICATION TO SYSTEM TIME $2::string`,
c.Args.DestTenantName, futureTime.AsOfSystemTime()).Scan(&cutoverStr)
var showCutover string
- c.DestSysSQL.QueryRow(c.T, fmt.Sprintf("SELECT cutover_time FROM [SHOW TENANT %s WITH REPLICATION STATUS]",
+ c.DestSysSQL.QueryRow(c.T, fmt.Sprintf("SELECT failover_time FROM [SHOW TENANT %s WITH REPLICATION STATUS]",
c.Args.DestTenantName)).Scan(&showCutover)
require.Equal(c.T, cutoverStr, showCutover)
cutoverOutput := replicationtestutils.DecimalTimeToHLC(c.T, showCutover)
diff --git a/pkg/ccl/crosscluster/physical/standby_read_ts_poller_job_test.go b/pkg/ccl/crosscluster/physical/standby_read_ts_poller_job_test.go
index 9daf6a7cd525..2d48abe2b8be 100644
--- a/pkg/ccl/crosscluster/physical/standby_read_ts_poller_job_test.go
+++ b/pkg/ccl/crosscluster/physical/standby_read_ts_poller_job_test.go
@@ -162,7 +162,7 @@ func TestFastFailbackWithReaderTenant(t *testing.T) {
"SET CLUSTER SETTING physical_replication.consumer.heartbeat_frequency = '1s'",
"SET CLUSTER SETTING physical_replication.consumer.job_checkpoint_frequency = '100ms'",
"SET CLUSTER SETTING physical_replication.consumer.minimum_flush_interval = '10ms'",
- "SET CLUSTER SETTING physical_replication.consumer.cutover_signal_poll_interval = '100ms'",
+ "SET CLUSTER SETTING physical_replication.consumer.failover_signal_poll_interval = '100ms'",
"SET CLUSTER SETTING spanconfig.reconciliation_job.checkpoint_interval = '100ms'",
} {
sqlA.Exec(t, s)
diff --git a/pkg/ccl/crosscluster/physical/stream_ingestion_job.go b/pkg/ccl/crosscluster/physical/stream_ingestion_job.go
index 4744865fb1f2..828a1ae01773 100644
--- a/pkg/ccl/crosscluster/physical/stream_ingestion_job.go
+++ b/pkg/ccl/crosscluster/physical/stream_ingestion_job.go
@@ -117,7 +117,7 @@ func completeIngestion(
msg := redact.Sprintf("completing the producer job %d in the source cluster",
details.StreamID)
- updateRunningStatus(ctx, ingestionJob, jobspb.ReplicationCuttingOver, msg)
+ updateRunningStatus(ctx, ingestionJob, jobspb.ReplicationFailingOver, msg)
completeProducerJob(ctx, ingestionJob, execCtx.ExecCfg().InternalDB, true)
evalContext := &execCtx.ExtendedEvalContext().Context
if err := startPostCutoverRetentionJob(ctx, execCtx.ExecCfg(), details, evalContext, cutoverTimestamp); err != nil {
@@ -273,7 +273,7 @@ func ingestWithRetries(
if err != nil {
return err
}
- updateRunningStatus(ctx, ingestionJob, jobspb.ReplicationCuttingOver,
+ updateRunningStatus(ctx, ingestionJob, jobspb.ReplicationFailingOver,
"stream ingestion finished successfully")
return nil
}
@@ -466,10 +466,10 @@ func maybeRevertToCutoverTimestamp(
shouldRevertToCutover = cutoverTimeIsEligibleForCutover(ctx, cutoverTimestamp, md.Progress)
if shouldRevertToCutover {
- updateRunningStatusInternal(md, ju, jobspb.ReplicationCuttingOver,
+ updateRunningStatusInternal(md, ju, jobspb.ReplicationFailingOver,
fmt.Sprintf("starting to cut over to the given timestamp %s", cutoverTimestamp))
} else {
- if streamIngestionProgress.ReplicationStatus == jobspb.ReplicationCuttingOver {
+ if streamIngestionProgress.ReplicationStatus == jobspb.ReplicationFailingOver {
return errors.AssertionFailedf("cutover already started but cutover time %s is not eligible for cutover",
cutoverTimestamp)
}
diff --git a/pkg/ccl/crosscluster/physical/stream_ingestion_job_test.go b/pkg/ccl/crosscluster/physical/stream_ingestion_job_test.go
index baae750cb3ea..da48a990e6f5 100644
--- a/pkg/ccl/crosscluster/physical/stream_ingestion_job_test.go
+++ b/pkg/ccl/crosscluster/physical/stream_ingestion_job_test.go
@@ -135,7 +135,7 @@ func TestTenantStreamingFailback(t *testing.T) {
"SET CLUSTER SETTING physical_replication.consumer.heartbeat_frequency = '1s'",
"SET CLUSTER SETTING physical_replication.consumer.job_checkpoint_frequency = '100ms'",
"SET CLUSTER SETTING physical_replication.consumer.minimum_flush_interval = '10ms'",
- "SET CLUSTER SETTING physical_replication.consumer.cutover_signal_poll_interval = '100ms'",
+ "SET CLUSTER SETTING physical_replication.consumer.failover_signal_poll_interval = '100ms'",
"SET CLUSTER SETTING spanconfig.reconciliation_job.checkpoint_interval = '100ms'",
} {
sqlA.Exec(t, s)
diff --git a/pkg/ccl/crosscluster/physical/stream_ingestion_processor.go b/pkg/ccl/crosscluster/physical/stream_ingestion_processor.go
index a9840fc153d6..b9d0cbbdf025 100644
--- a/pkg/ccl/crosscluster/physical/stream_ingestion_processor.go
+++ b/pkg/ccl/crosscluster/physical/stream_ingestion_processor.go
@@ -84,11 +84,11 @@ var tooSmallRangeKeySize = settings.RegisterByteSizeSetting(
// signaled to cutover.
var cutoverSignalPollInterval = settings.RegisterDurationSetting(
settings.SystemOnly,
- "bulkio.stream_ingestion.cutover_signal_poll_interval",
+ "bulkio.stream_ingestion.failover_signal_poll_interval",
"the interval at which the stream ingestion job checks if it has been signaled to cutover",
10*time.Second,
settings.NonNegativeDuration,
- settings.WithName("physical_replication.consumer.cutover_signal_poll_interval"),
+ settings.WithName("physical_replication.consumer.failover_signal_poll_interval"),
)
var quantize = settings.RegisterDurationSettingWithExplicitUnit(
diff --git a/pkg/ccl/crosscluster/physical/testdata/simple b/pkg/ccl/crosscluster/physical/testdata/simple
index cf5f5f4b4f43..f8c4b6d1a2f0 100644
--- a/pkg/ccl/crosscluster/physical/testdata/simple
+++ b/pkg/ccl/crosscluster/physical/testdata/simple
@@ -67,7 +67,7 @@ SHOW TENANTS
2 destination replicating none
query-sql as=destination-system
-SELECT id, name, source_tenant_name, cutover_time, status FROM [SHOW TENANTS WITH REPLICATION STATUS]
+SELECT id, name, source_tenant_name, failover_time, status FROM [SHOW TENANTS WITH REPLICATION STATUS]
----
1 system ready
2 destination source replicating
diff --git a/pkg/ccl/crosscluster/producer/event_stream.go b/pkg/ccl/crosscluster/producer/event_stream.go
index 84c3aa783d4d..1d34cac6bc00 100644
--- a/pkg/ccl/crosscluster/producer/event_stream.go
+++ b/pkg/ccl/crosscluster/producer/event_stream.go
@@ -420,6 +420,53 @@ func (s *eventStream) sendFlush(ctx context.Context, event *streampb.StreamEvent
}
}
+type checkpointPacer struct {
+ pace time.Duration
+ next time.Time
+ skipped bool
+}
+
+func makeCheckpointPacer(frequency time.Duration) checkpointPacer {
+ return checkpointPacer{
+ pace: frequency,
+ next: timeutil.Now().Add(frequency),
+ skipped: false,
+ }
+}
+
+func (p *checkpointPacer) shouldCheckpoint(
+ currentFrontier hlc.Timestamp, frontierAdvanced bool,
+) bool {
+ now := timeutil.Now()
+ enoughTimeElapsed := p.next.Before(now)
+
+ // Handle previously skipped updates.
+ // Normally, we want to emit checkpoint records when frontier advances.
+ // However, checkpoints could be skipped if the frontier advanced too rapidly
+ // (i.e. more rapid than MinCheckpointFrequency). In those cases, we skip emitting
+ // the checkpoint, but we will emit it at a later time.
+ if p.skipped {
+ if enoughTimeElapsed {
+ p.skipped = false
+ p.next = now.Add(p.pace)
+ return true
+ }
+ return false
+ }
+
+ isInitialScanCheckpoint := currentFrontier.IsEmpty()
+ // Handle updates when frontier advances.
+ if frontierAdvanced || isInitialScanCheckpoint {
+ if enoughTimeElapsed {
+ p.next = now.Add(p.pace)
+ return true
+ }
+ p.skipped = true
+ return false
+ }
+ return false
+}
+
// Add a RangeFeedSSTable into current batch.
func (s *eventStream) addSST(sst *kvpb.RangeFeedSSTable, registeredSpan roachpb.Span) error {
// We send over the whole SSTable if the sst span is within
diff --git a/pkg/ccl/crosscluster/producer/span_config_event_stream.go b/pkg/ccl/crosscluster/producer/span_config_event_stream.go
index fb3d6f50b47c..723c09807966 100644
--- a/pkg/ccl/crosscluster/producer/span_config_event_stream.go
+++ b/pkg/ccl/crosscluster/producer/span_config_event_stream.go
@@ -7,7 +7,6 @@ package producer
import (
"context"
- "time"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
@@ -25,7 +24,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
- "github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
)
@@ -212,27 +210,6 @@ func (s *spanConfigEventStream) flushEvent(ctx context.Context, event *streampb.
}
}
-type checkpointPacer struct {
- pace time.Duration
- next time.Time
-}
-
-func makeCheckpointPacer(frequency time.Duration) checkpointPacer {
- return checkpointPacer{
- pace: frequency,
- next: timeutil.Now().Add(frequency),
- }
-}
-
-func (p *checkpointPacer) shouldCheckpoint() bool {
- now := timeutil.Now()
- if p.next.Before(now) {
- p.next = now.Add(p.pace)
- return true
- }
- return false
-}
-
// streamLoop is the main processing loop responsible for reading buffered rangefeed events,
// accumulating them in a batch, and sending those events to the ValueGenerator.
func (s *spanConfigEventStream) streamLoop(ctx context.Context) error {
@@ -294,7 +271,7 @@ func (s *spanConfigEventStream) streamLoop(ctx context.Context) error {
}
batcher.addSpanConfigs(bufferedEvents, update.Timestamp)
bufferedEvents = bufferedEvents[:0]
- if pacer.shouldCheckpoint() || fromFullScan {
+ if pacer.shouldCheckpoint(update.Timestamp, true) || fromFullScan {
log.VEventf(ctx, 2, "checkpointing span config stream at %s", update.Timestamp.GoTime())
if batcher.getSize() > 0 {
log.VEventf(ctx, 2, "sending %d span config events", len(batcher.batch.SpanConfigs))
diff --git a/pkg/ccl/crosscluster/replicationtestutils/testutils.go b/pkg/ccl/crosscluster/replicationtestutils/testutils.go
index 85d51ce4ee3f..eef03ae7d5ab 100644
--- a/pkg/ccl/crosscluster/replicationtestutils/testutils.go
+++ b/pkg/ccl/crosscluster/replicationtestutils/testutils.go
@@ -646,13 +646,13 @@ var defaultSrcClusterSetting = map[string]string{
}
var defaultDestClusterSetting = map[string]string{
- `stream_replication.consumer_heartbeat_frequency`: `'1s'`,
- `stream_replication.job_checkpoint_frequency`: `'100ms'`,
- `bulkio.stream_ingestion.minimum_flush_interval`: `'10ms'`,
- `bulkio.stream_ingestion.cutover_signal_poll_interval`: `'100ms'`,
- `jobs.registry.interval.adopt`: `'1s'`,
- `spanconfig.reconciliation_job.checkpoint_interval`: `'100ms'`,
- `kv.rangefeed.enabled`: `true`,
+ `stream_replication.consumer_heartbeat_frequency`: `'1s'`,
+ `stream_replication.job_checkpoint_frequency`: `'100ms'`,
+ `bulkio.stream_ingestion.minimum_flush_interval`: `'10ms'`,
+ `bulkio.stream_ingestion.failover_signal_poll_interval`: `'100ms'`,
+ `jobs.registry.interval.adopt`: `'1s'`,
+ `spanconfig.reconciliation_job.checkpoint_interval`: `'100ms'`,
+ `kv.rangefeed.enabled`: `true`,
}
func ConfigureClusterSettings(setting map[string]string) []string {
diff --git a/pkg/ccl/ldapccl/authentication_ldap.go b/pkg/ccl/ldapccl/authentication_ldap.go
index 8fd79a7f89ac..de1e747e4285 100644
--- a/pkg/ccl/ldapccl/authentication_ldap.go
+++ b/pkg/ccl/ldapccl/authentication_ldap.go
@@ -87,6 +87,12 @@ func (authManager *ldapAuthManager) FetchLDAPUserDN(
errors.Newf("LDAP authentication: unable to establish LDAP connection")
}
+ // Bind with ldap service user DN and passwd for performing the search for ldap user.
+ if err := authManager.mu.util.Bind(ctx, authManager.mu.conf.ldapBindDN, authManager.mu.conf.ldapBindPassword); err != nil {
+ return nil, redact.Sprintf("error binding ldap service account: %v", err),
+ errors.Newf("LDAP authentication: error binding as LDAP service user with configured credentials")
+ }
+
// Fetch the ldap server Distinguished Name using sql username as search value
// for ldap search attribute
userDN, err := authManager.mu.util.Search(ctx, authManager.mu.conf, user.Normalized())
diff --git a/pkg/ccl/ldapccl/authentication_ldap_test.go b/pkg/ccl/ldapccl/authentication_ldap_test.go
index 5b516ac45f51..83d1a52cb5af 100644
--- a/pkg/ccl/ldapccl/authentication_ldap_test.go
+++ b/pkg/ccl/ldapccl/authentication_ldap_test.go
@@ -76,14 +76,14 @@ func TestLDAPFetchUser(t *testing.T) {
expectedDetailedErrMsg: `error when searching for user in LDAP server: LDAP search failed: invalid base DN ‹"invalid"› provided`},
{testName: "invalid bind dn",
hbaConfLDAPOpts: map[string]string{"ldapbinddn": invalidParam}, user: "foo", fetchUserSuccess: false,
- expectedErr: "LDAP authentication: unable to find LDAP user distinguished name",
- expectedErrDetails: "cannot find provided user foo on LDAP server",
- expectedDetailedErrMsg: "error when searching for user in LDAP server: LDAP search failed: LDAP bind failed: invalid username provided"},
+ expectedErr: "LDAP authentication: error binding as LDAP service user with configured credentials",
+ expectedErrDetails: "",
+ expectedDetailedErrMsg: "error binding ldap service account: LDAP bind failed: invalid username provided"},
{testName: "invalid bind pwd",
hbaConfLDAPOpts: map[string]string{"ldapbindpasswd": invalidParam}, user: "foo", fetchUserSuccess: false,
- expectedErr: "LDAP authentication: unable to find LDAP user distinguished name",
- expectedErrDetails: "cannot find provided user foo on LDAP server",
- expectedDetailedErrMsg: "error when searching for user in LDAP server: LDAP search failed: LDAP bind failed: invalid password provided"},
+ expectedErr: "LDAP authentication: error binding as LDAP service user with configured credentials",
+ expectedErrDetails: "",
+ expectedDetailedErrMsg: "error binding ldap service account: LDAP bind failed: invalid password provided"},
{testName: "invalid search attribute",
hbaConfLDAPOpts: map[string]string{"ldapsearchattribute": invalidParam}, user: "foo", fetchUserSuccess: false,
expectedErr: "LDAP authentication: unable to find LDAP user distinguished name",
@@ -192,3 +192,48 @@ func TestLDAPAuthentication(t *testing.T) {
})
}
}
+
+func TestLDAPConnectionReset(t *testing.T) {
+ defer leaktest.AfterTest(t)()
+ defer log.Scope(t).Close(t)
+ // Intercept the call to NewLDAPUtil and return the mocked NewLDAPUtil function
+ mockLDAP, newMockLDAPUtil := LDAPMocks()
+ defer testutils.TestingHook(
+ &NewLDAPUtil,
+ newMockLDAPUtil)()
+ ctx := context.Background()
+ s := serverutils.StartServerOnly(t, base.TestServerArgs{})
+ defer s.Stopper().Stop(ctx)
+
+ manager := ConfigureLDAPAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID())
+ hbaEntryBase := "host all all all ldap "
+ hbaConfLDAPDefaultOpts := map[string]string{
+ "ldapserver": "localhost",
+ "ldapport": "636",
+ "ldapbasedn": "dc=localhost",
+ "ldapbinddn": "cn=readonly,dc=localhost",
+ "ldapbindpasswd": "readonly_pwd",
+ "ldapsearchattribute": "uid",
+ "ldapsearchfilter": "(memberOf=cn=users,ou=groups,dc=localhost)",
+ "ldapgrouplistfilter": "(cn=ldap_parent_1)",
+ }
+ hbaEntry := constructHBAEntry(t, hbaEntryBase, hbaConfLDAPDefaultOpts, nil)
+
+ if _, _, err := manager.FetchLDAPUserDN(
+ ctx, s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString("foo"), &hbaEntry, nil); err != nil {
+ t.Fatalf("expected success, got err=%v", err)
+ }
+ ldapConnection1 := mockLDAP.getLDAPsConn()
+
+ mockLDAP.resetLDAPsConn()
+
+ if _, _, err := manager.FetchLDAPUserDN(
+ ctx, s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString("foo"), &hbaEntry, nil); err != nil {
+ t.Fatalf("expected success, got err=%v", err)
+ }
+ ldapConnection2 := mockLDAP.getLDAPsConn()
+
+ require.Falsef(t, ldapConnection1 == ldapConnection2,
+ "expected a different ldap connection as previous connection was reset by server, conn1: %v, conn2: %v",
+ ldapConnection1, ldapConnection2)
+}
diff --git a/pkg/ccl/ldapccl/authorization_ldap.go b/pkg/ccl/ldapccl/authorization_ldap.go
index f3de821d6ae3..72a92968cfc9 100644
--- a/pkg/ccl/ldapccl/authorization_ldap.go
+++ b/pkg/ccl/ldapccl/authorization_ldap.go
@@ -84,6 +84,12 @@ func (authManager *ldapAuthManager) FetchLDAPGroups(
errors.Newf("LDAP authorization: unable to establish LDAP connection")
}
+ // Bind with ldap service user DN and passwd for performing the groups listing for ldap user.
+ if err := authManager.mu.util.Bind(ctx, authManager.mu.conf.ldapBindDN, authManager.mu.conf.ldapBindPassword); err != nil {
+ return nil, redact.Sprintf("error binding ldap service account: %v", err),
+ errors.Newf("LDAP authorization: error binding as LDAP service user with configured credentials")
+ }
+
// Fetch the ldap server Distinguished Name using sql username as search value
// for ldap search attribute
fetchedGroups, err := authManager.mu.util.ListGroups(ctx, authManager.mu.conf, userDN.String())
diff --git a/pkg/ccl/ldapccl/authorization_ldap_test.go b/pkg/ccl/ldapccl/authorization_ldap_test.go
index b19d67feb4b8..8bf97e92171e 100644
--- a/pkg/ccl/ldapccl/authorization_ldap_test.go
+++ b/pkg/ccl/ldapccl/authorization_ldap_test.go
@@ -77,14 +77,14 @@ func TestLDAPAuthorization(t *testing.T) {
expectedDetailedErrMsg: `error when fetching groups for user dn ‹"cn=foo"› in LDAP server: LDAP groups list failed: invalid base DN ‹"invalid"› provided`},
{testName: "invalid bind dn",
hbaConfLDAPOpts: map[string]string{"ldapbinddn": invalidParam}, user: "cn=foo", authZSuccess: false,
- expectedErr: "LDAP authorization: unable to fetch groups for user",
- expectedErrDetails: "cannot find groups for which user is a member",
- expectedDetailedErrMsg: `error when fetching groups for user dn ‹"cn=foo"› in LDAP server: LDAP groups list failed: LDAP bind failed: invalid username provided`},
+ expectedErr: "LDAP authorization: error binding as LDAP service user with configured credentials",
+ expectedErrDetails: "",
+ expectedDetailedErrMsg: `error binding ldap service account: LDAP bind failed: invalid username provided`},
{testName: "invalid bind pwd",
hbaConfLDAPOpts: map[string]string{"ldapbindpasswd": invalidParam}, user: "cn=foo", authZSuccess: false,
- expectedErr: "LDAP authorization: unable to fetch groups for user",
- expectedErrDetails: "cannot find groups for which user is a member",
- expectedDetailedErrMsg: `error when fetching groups for user dn ‹"cn=foo"› in LDAP server: LDAP groups list failed: LDAP bind failed: invalid password provided`},
+ expectedErr: "LDAP authorization: error binding as LDAP service user with configured credentials",
+ expectedErrDetails: "",
+ expectedDetailedErrMsg: `error binding ldap service account: LDAP bind failed: invalid password provided`},
{testName: "invalid group list filter",
hbaConfLDAPOpts: map[string]string{"ldapgrouplistfilter": invalidParam}, user: "cn=foo", authZSuccess: false,
expectedErr: "LDAP authorization: unable to fetch groups for user",
diff --git a/pkg/ccl/ldapccl/ldap_test_util.go b/pkg/ccl/ldapccl/ldap_test_util.go
index 29ed1cb978b4..773e55462626 100644
--- a/pkg/ccl/ldapccl/ldap_test_util.go
+++ b/pkg/ccl/ldapccl/ldap_test_util.go
@@ -26,16 +26,17 @@ type mockLDAPUtil struct {
conn *ldap.Conn
tlsConfig *tls.Config
userGroupDNs map[string][]string
+ connClosing bool
}
var _ ILDAPUtil = &mockLDAPUtil{}
-var LDAPMocks = func() (mockLDAPUtil, func(context.Context, ldapConfig) (ILDAPUtil, error)) {
+var LDAPMocks = func() (*mockLDAPUtil, func(context.Context, ldapConfig) (ILDAPUtil, error)) {
var mLU = mockLDAPUtil{tlsConfig: &tls.Config{}, userGroupDNs: make(map[string][]string)}
var newMockLDAPUtil = func(ctx context.Context, conf ldapConfig) (ILDAPUtil, error) {
return &mLU, nil
}
- return mLU, newMockLDAPUtil
+ return &mLU, newMockLDAPUtil
}
// MaybeInitLDAPsConn implements the ILDAPUtil interface.
@@ -45,10 +46,25 @@ func (lu *mockLDAPUtil) MaybeInitLDAPsConn(ctx context.Context, conf ldapConfig)
} else if strings.Contains(conf.ldapPort, invalidParam) {
return errors.Newf(ldapsFailureMessage + ": invalid ldap port provided")
}
- lu.conn = &ldap.Conn{}
+ if lu.conn != nil && !lu.connClosing {
+ return nil
+ }
+ lu.conn = ldap.NewConn(nil, true)
return nil
}
+// resetLDAPsConn mocks server behavior for sending ECONNRESET in case of
+// prolonged connection idleness.
+// ref: https://github.com/cockroachdb/cockroach/issues/133777
+func (lu *mockLDAPUtil) resetLDAPsConn() {
+ lu.connClosing = true
+}
+
+// getLDAPsConn returns the current ldap conn set for the ldap util.
+func (lu *mockLDAPUtil) getLDAPsConn() *ldap.Conn {
+ return lu.conn
+}
+
// Bind implements the ILDAPUtil interface.
func (lu *mockLDAPUtil) Bind(ctx context.Context, userDN string, ldapPwd string) error {
if strings.Contains(userDN, invalidParam) {
@@ -64,9 +80,6 @@ func (lu *mockLDAPUtil) Bind(ctx context.Context, userDN string, ldapPwd string)
func (lu *mockLDAPUtil) Search(
ctx context.Context, conf ldapConfig, username string,
) (userDN string, err error) {
- if err := lu.Bind(ctx, conf.ldapBindDN, conf.ldapBindPassword); err != nil {
- return "", errors.Wrap(err, searchFailureMessage)
- }
if strings.Contains(conf.ldapBaseDN, invalidParam) {
return "", errors.Newf(searchFailureMessage+": invalid base DN %q provided", conf.ldapBaseDN)
}
@@ -105,9 +118,6 @@ func (lu *mockLDAPUtil) SetGroups(userDN string, groupsDN []string) {
func (lu *mockLDAPUtil) ListGroups(
ctx context.Context, conf ldapConfig, userDN string,
) (ldapGroupsDN []string, err error) {
- if err := lu.Bind(ctx, conf.ldapBindDN, conf.ldapBindPassword); err != nil {
- return nil, errors.Wrap(err, groupListFailureMessage)
- }
if strings.Contains(conf.ldapBaseDN, invalidParam) {
return nil, errors.Newf(groupListFailureMessage+": invalid base DN %q provided", conf.ldapBaseDN)
}
diff --git a/pkg/ccl/ldapccl/ldap_util.go b/pkg/ccl/ldapccl/ldap_util.go
index 05b44100365c..956a1d60f179 100644
--- a/pkg/ccl/ldapccl/ldap_util.go
+++ b/pkg/ccl/ldapccl/ldap_util.go
@@ -45,8 +45,10 @@ func (lu *ldapUtil) MaybeInitLDAPsConn(ctx context.Context, conf ldapConfig) (er
// connections crdb nodes can take up(either in total or on a per node basis)
//
// ldapAddress := "ldap://ldap.example.com:636"
- //
- if lu.conn != nil {
+ // If the connection is idle for sometime, we get a ERRCONNRESET error from
+ // server, the ldap client sets the connection to closing. We need to dial for
+ // a new connection to continue using the client.
+ if lu.conn != nil && !lu.conn.IsClosing() {
return nil
}
ldapAddress := conf.ldapServer + ":" + conf.ldapPort
@@ -68,9 +70,6 @@ func (lu *ldapUtil) Bind(ctx context.Context, userDN string, ldapPwd string) (er
func (lu *ldapUtil) Search(
ctx context.Context, conf ldapConfig, username string,
) (userDN string, err error) {
- if err := lu.Bind(ctx, conf.ldapBindDN, conf.ldapBindPassword); err != nil {
- return "", errors.Wrap(err, searchFailureMessage)
- }
// TODO(souravcrl): Currently search could be performed at subtree level but
// this should be configurable through HBA conf using any of the scopes
// provided: https://github.com/go-ldap/ldap/blob/master/search.go#L17-L24
@@ -100,9 +99,6 @@ func (lu *ldapUtil) Search(
func (lu *ldapUtil) ListGroups(
ctx context.Context, conf ldapConfig, userDN string,
) (_ []string, err error) {
- if err := lu.Bind(ctx, conf.ldapBindDN, conf.ldapBindPassword); err != nil {
- return nil, errors.Wrap(err, groupListFailureMessage)
- }
// TODO(souravcrl): Currently list groups can only be performed at subtree
// level but this should be configurable through HBA conf using any of the
// scopes provided:
diff --git a/pkg/ccl/logictestccl/testdata/logic_test/partitioning_implicit_read_committed b/pkg/ccl/logictestccl/testdata/logic_test/partitioning_implicit_read_committed
index d96f2b4c5d13..4b6f7da30ce8 100644
--- a/pkg/ccl/logictestccl/testdata/logic_test/partitioning_implicit_read_committed
+++ b/pkg/ccl/logictestccl/testdata/logic_test/partitioning_implicit_read_committed
@@ -160,15 +160,7 @@ SET tracing = kv
# Test a blind write.
statement ok
-UPSERT INTO overwrite VALUES (1, 'two', 3)
-
-# Test a blind rewrite. No tombstones because the PK doesn't change.
-statement ok
-UPSERT INTO overwrite VALUES (1, 'two', 4)
-
-# Test a blind overwrite.
-statement ok
-UPSERT INTO overwrite VALUES (1, 'three', 5)
+UPSERT INTO overwrite VALUES (1, 'two', 3);
query T
SELECT message FROM [SHOW TRACE FOR SESSION] WHERE message LIKE 'CPut%'
@@ -178,16 +170,6 @@ CPut /Table/111/1/" "/1/0 -> nil (tombstone)
CPut /Table/111/1/"\x80"/1/0 -> nil (tombstone)
CPut /Table/111/1/"\xa0"/1/0 -> nil (tombstone)
CPut /Table/111/1/"\xc0"/1/0 -> nil (tombstone)
-CPut /Table/111/1/"\x80"/1/0 -> /TUPLE/3:3:Int/5
-CPut /Table/111/1/" "/1/0 -> nil (tombstone)
-CPut /Table/111/1/"@"/1/0 -> nil (tombstone)
-CPut /Table/111/1/"\xa0"/1/0 -> nil (tombstone)
-CPut /Table/111/1/"\xc0"/1/0 -> nil (tombstone)
-
-query ITI
-SELECT * FROM overwrite ORDER BY pk
-----
-1 three 5
statement ok
INSERT INTO t VALUES (1, 'two', 3, 4, 5)
@@ -224,11 +206,6 @@ CPut /Table/111/1/" "/1/0 -> nil (tombstone)
CPut /Table/111/1/"\x80"/1/0 -> nil (tombstone)
CPut /Table/111/1/"\xa0"/1/0 -> nil (tombstone)
CPut /Table/111/1/"\xc0"/1/0 -> nil (tombstone)
-CPut /Table/111/1/"\x80"/1/0 -> /TUPLE/3:3:Int/5
-CPut /Table/111/1/" "/1/0 -> nil (tombstone)
-CPut /Table/111/1/"@"/1/0 -> nil (tombstone)
-CPut /Table/111/1/"\xa0"/1/0 -> nil (tombstone)
-CPut /Table/111/1/"\xc0"/1/0 -> nil (tombstone)
CPut /Table/110/1/"@"/1/0 -> /TUPLE/3:3:Int/3/1:4:Int/4/1:5:Int/5
CPut /Table/110/1/" "/1/0 -> nil (tombstone)
CPut /Table/110/1/"\x80"/1/0 -> nil (tombstone)
diff --git a/pkg/ccl/multiregionccl/region_test.go b/pkg/ccl/multiregionccl/region_test.go
index 51c0e84cbe84..012fe8891605 100644
--- a/pkg/ccl/multiregionccl/region_test.go
+++ b/pkg/ccl/multiregionccl/region_test.go
@@ -932,8 +932,8 @@ func testRegionAddDropWithConcurrentBackupOps(
}{
{
name: "backup-database",
- backupOp: `BACKUP DATABASE db INTO 'nodelocal://1/db_backup'`,
- restoreOp: `RESTORE DATABASE db FROM LATEST IN 'nodelocal://1/db_backup'`,
+ backupOp: `BACKUP DATABASE db TO 'nodelocal://1/db_backup'`,
+ restoreOp: `RESTORE DATABASE db FROM 'nodelocal://1/db_backup'`,
},
}
diff --git a/pkg/ccl/schemachangerccl/backup_base_generated_test.go b/pkg/ccl/schemachangerccl/backup_base_generated_test.go
index 0627cfa8bd93..2ad0efe202c9 100644
--- a/pkg/ccl/schemachangerccl/backup_base_generated_test.go
+++ b/pkg/ccl/schemachangerccl/backup_base_generated_test.go
@@ -169,13 +169,6 @@ func TestBackupRollbacks_base_alter_table_alter_column_set_not_null(t *testing.T
sctest.BackupRollbacks(t, path, sctest.SingleNodeTestClusterFactory{})
}
-func TestBackupRollbacks_base_alter_table_alter_column_type_general(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
- const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general"
- sctest.BackupRollbacks(t, path, sctest.SingleNodeTestClusterFactory{})
-}
-
func TestBackupRollbacks_base_alter_table_alter_column_type_noop(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
@@ -694,13 +687,6 @@ func TestBackupRollbacksMixedVersion_base_alter_table_alter_column_set_not_null(
sctest.BackupRollbacksMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
-func TestBackupRollbacksMixedVersion_base_alter_table_alter_column_type_general(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
- const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general"
- sctest.BackupRollbacksMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
-}
-
func TestBackupRollbacksMixedVersion_base_alter_table_alter_column_type_noop(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
@@ -1219,13 +1205,6 @@ func TestBackupSuccess_base_alter_table_alter_column_set_not_null(t *testing.T)
sctest.BackupSuccess(t, path, sctest.SingleNodeTestClusterFactory{})
}
-func TestBackupSuccess_base_alter_table_alter_column_type_general(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
- const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general"
- sctest.BackupSuccess(t, path, sctest.SingleNodeTestClusterFactory{})
-}
-
func TestBackupSuccess_base_alter_table_alter_column_type_noop(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
@@ -1744,13 +1723,6 @@ func TestBackupSuccessMixedVersion_base_alter_table_alter_column_set_not_null(t
sctest.BackupSuccessMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
-func TestBackupSuccessMixedVersion_base_alter_table_alter_column_type_general(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
- const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general"
- sctest.BackupSuccessMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
-}
-
func TestBackupSuccessMixedVersion_base_alter_table_alter_column_type_noop(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
diff --git a/pkg/ccl/schemachangerccl/testdata/decomp/multiregion b/pkg/ccl/schemachangerccl/testdata/decomp/multiregion
index 38541b7044cf..fab7e8cd135a 100644
--- a/pkg/ccl/schemachangerccl/testdata/decomp/multiregion
+++ b/pkg/ccl/schemachangerccl/testdata/decomp/multiregion
@@ -190,37 +190,31 @@ ElementState:
tableId: 110
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: b
tableId: 110
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: rowid
tableId: 110
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 110
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 110
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 110
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 110
@@ -585,37 +579,31 @@ ElementState:
tableId: 109
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: a
tableId: 109
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: rowid
tableId: 109
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 109
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 109
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 109
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 109
@@ -1009,43 +997,36 @@ ElementState:
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: k
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: v
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 3
name: crdb_region
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 108
diff --git a/pkg/ccl/schemachangerccl/testdata/decomp/partitioning b/pkg/ccl/schemachangerccl/testdata/decomp/partitioning
index 96a189e579c1..7638ef2d8b97 100644
--- a/pkg/ccl/schemachangerccl/testdata/decomp/partitioning
+++ b/pkg/ccl/schemachangerccl/testdata/decomp/partitioning
@@ -111,43 +111,36 @@ ElementState:
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: pk
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: a
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 3
name: j
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 104
@@ -624,37 +617,31 @@ ElementState:
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: a
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: b
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 105
diff --git a/pkg/ccl/serverccl/diagnosticsccl/reporter_test.go b/pkg/ccl/serverccl/diagnosticsccl/reporter_test.go
index d61dddebde58..9fc1bfc8ec38 100644
--- a/pkg/ccl/serverccl/diagnosticsccl/reporter_test.go
+++ b/pkg/ccl/serverccl/diagnosticsccl/reporter_test.go
@@ -125,6 +125,19 @@ func TestServerReport(t *testing.T) {
})
}
+ // We want to ensure that non-reportable settings, sensitive
+ // settings, and all string settings are redacted. Below we override
+ // one of each.
+ settingOverrides := []string{
+ `SET CLUSTER SETTING server.oidc_authentication.client_id = 'sensitive-client-id'`, // Sensitive setting.
+ `SET CLUSTER SETTING sql.log.user_audit = 'test_role NONE'`, // Non-reportable setting.
+ `SET CLUSTER SETTING changefeed.node_throttle_config = '{"message_rate": 0.5}'`, // String setting.
+ }
+ for _, s := range settingOverrides {
+ _, err := rt.serverDB.Exec(s)
+ require.NoError(t, err)
+ }
+
expectedUsageReports := 0
clusterSecret := sql.ClusterSecret.Get(&rt.settings.SV)
@@ -196,7 +209,7 @@ func TestServerReport(t *testing.T) {
// 3 + 3 = 6: set 3 initially and org is set mid-test for 3 altered settings,
// plus version, reporting and secret settings are set in startup
// migrations.
- expected, actual := 7, len(last.AlteredSettings)
+ expected, actual := 7+len(settingOverrides), len(last.AlteredSettings)
require.Equal(t, expected, actual, "expected %d changed settings, got %d: %v", expected, actual, last.AlteredSettings)
for key, expected := range map[string]string{
@@ -207,6 +220,9 @@ func TestServerReport(t *testing.T) {
"server.time_until_store_dead": "1m30s",
"version": clusterversion.Latest.String(),
"cluster.secret": "",
+ "server.oidc_authentication.client_id": "",
+ "sql.log.user_audit": "",
+ "changefeed.node_throttle_config": "",
} {
got, ok := last.AlteredSettings[key]
require.True(t, ok, "expected report of altered setting %q", key)
diff --git a/pkg/ccl/storageccl/engineccl/bench_test.go b/pkg/ccl/storageccl/engineccl/bench_test.go
index 019a3ac7047f..582fc11ae7d8 100644
--- a/pkg/ccl/storageccl/engineccl/bench_test.go
+++ b/pkg/ccl/storageccl/engineccl/bench_test.go
@@ -94,6 +94,9 @@ func loadTestData(
minWallTime = minSStableTimestamps[i/scaled]
}
timestamp := hlc.Timestamp{WallTime: minWallTime + rand.Int63n(int64(batchTimeSpan))}
+ if timestamp.Less(hlc.MinTimestamp) {
+ timestamp = hlc.MinTimestamp
+ }
value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueBytes))
value.InitChecksum(key)
if _, err := storage.MVCCPut(ctx, batch, key, timestamp, value, storage.MVCCWriteOptions{}); err != nil {
diff --git a/pkg/ccl/testccl/authccl/testdata/ldap b/pkg/ccl/testccl/authccl/testdata/ldap
index 3649c432ea9a..006ddb6b0c01 100644
--- a/pkg/ccl/testccl/authccl/testdata/ldap
+++ b/pkg/ccl/testccl/authccl/testdata/ldap
@@ -88,6 +88,48 @@ ERROR: LDAP option "ldapsearchfilter" is set to invalid value: "(*)": "ldapsearc
subtest end
+subtest incorrect_ldap_service_account
+
+set_hba
+host all ldap_user 127.0.0.1/32 ldap ldapserver=localhost ldapport=636 ldapbasedn="O=security org,DC=localhost" ldapbinddn="cn=invalid" ldapbindpasswd=ldap_pwd ldapsearchattribute=sAMAccountName ldapsearchfilter="(memberOf=*)"
+----
+# Active authentication configuration on this node:
+# Original configuration:
+# loopback all all all trust # built-in CockroachDB default
+# host all root all cert-password # CockroachDB mandatory rule
+# host all ldap_user 127.0.0.1/32 ldap ldapserver=localhost ldapport=636 ldapbasedn="O=security org,DC=localhost" ldapbinddn="cn=invalid" ldapbindpasswd=ldap_pwd ldapsearchattribute=sAMAccountName ldapsearchfilter="(memberOf=*)"
+#
+# Interpreted configuration:
+# TYPE DATABASE USER ADDRESS METHOD OPTIONS
+loopback all all all trust
+host all root all cert-password
+host all ldap_user 127.0.0.1/32 ldap ldapserver=localhost ldapport=636 "ldapbasedn=O=security org,DC=localhost" "ldapbinddn=cn=invalid" ldapbindpasswd=ldap_pwd ldapsearchattribute=sAMAccountName "ldapsearchfilter=(memberOf=*)"
+
+connect user=ldap_user password="valid"
+----
+ERROR: LDAP authentication: error binding as LDAP service user with configured credentials (SQLSTATE 28000)
+
+set_hba
+host all ldap_user 127.0.0.1/32 ldap ldapserver=localhost ldapport=636 ldapbasedn="O=security org,DC=localhost" ldapbinddn="CN=service_account,O=security org,DC=localhost" ldapbindpasswd="invalid" ldapsearchattribute=sAMAccountName ldapsearchfilter="(memberOf=*)"
+----
+# Active authentication configuration on this node:
+# Original configuration:
+# loopback all all all trust # built-in CockroachDB default
+# host all root all cert-password # CockroachDB mandatory rule
+# host all ldap_user 127.0.0.1/32 ldap ldapserver=localhost ldapport=636 ldapbasedn="O=security org,DC=localhost" ldapbinddn="CN=service_account,O=security org,DC=localhost" ldapbindpasswd="invalid" ldapsearchattribute=sAMAccountName ldapsearchfilter="(memberOf=*)"
+#
+# Interpreted configuration:
+# TYPE DATABASE USER ADDRESS METHOD OPTIONS
+loopback all all all trust
+host all root all cert-password
+host all ldap_user 127.0.0.1/32 ldap ldapserver=localhost ldapport=636 "ldapbasedn=O=security org,DC=localhost" "ldapbinddn=CN=service_account,O=security org,DC=localhost" "ldapbindpasswd=invalid" ldapsearchattribute=sAMAccountName "ldapsearchfilter=(memberOf=*)"
+
+connect user=ldap_user password="valid"
+----
+ERROR: LDAP authentication: error binding as LDAP service user with configured credentials (SQLSTATE 28000)
+
+subtest end
+
subtest invalid_ldap_password
set_hba
diff --git a/pkg/ccl/workloadccl/allccl/all_test.go b/pkg/ccl/workloadccl/allccl/all_test.go
index 3d7072bd7b5d..1b97c183ac7f 100644
--- a/pkg/ccl/workloadccl/allccl/all_test.go
+++ b/pkg/ccl/workloadccl/allccl/all_test.go
@@ -113,6 +113,7 @@ func TestAllRegisteredImportFixture(t *testing.T) {
func TestAllRegisteredSetup(t *testing.T) {
defer leaktest.AfterTest(t)()
+ skip.UnderDeadlock(t)
for _, meta := range workload.Registered() {
if bigInitialData(meta) {
diff --git a/pkg/cli/demo.go b/pkg/cli/demo.go
index 0bd177c9052f..335d559eedeb 100644
--- a/pkg/cli/demo.go
+++ b/pkg/cli/demo.go
@@ -38,10 +38,6 @@ subcommands: e.g. "cockroach demo startrek". See --help for a full list.
By default, the 'movr' dataset is pre-loaded. You can also use --no-example-database
to avoid pre-loading a dataset.
-
-cockroach demo attempts to connect to a Cockroach Labs server to send
-telemetry back to Cockroach Labs. In order to disable this behavior, set the
-environment variable "COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING" to true.
`,
Example: ` cockroach demo`,
Args: cobra.NoArgs,
diff --git a/pkg/cli/flags.go b/pkg/cli/flags.go
index 60f6878fa1ba..647419c48e30 100644
--- a/pkg/cli/flags.go
+++ b/pkg/cli/flags.go
@@ -647,7 +647,6 @@ func init() {
if cmd == createClientCertCmd {
cliflagcfg.VarFlag(f, &tenantIDSetter{tenantIDs: &certCtx.tenantScope}, cliflags.TenantScope)
cliflagcfg.VarFlag(f, &tenantNameSetter{tenantNames: &certCtx.tenantNameScope}, cliflags.TenantScopeByNames)
- _ = f.MarkHidden(cliflags.TenantScopeByNames.Name)
// PKCS8 key format is only available for the client cert command.
cliflagcfg.BoolFlag(f, &certCtx.generatePKCS8Key, cliflags.GeneratePKCS8Key)
diff --git a/pkg/cli/testdata/declarative-rules/deprules b/pkg/cli/testdata/declarative-rules/deprules
index 844da777f531..9cf90b386e04 100644
--- a/pkg/cli/testdata/declarative-rules/deprules
+++ b/pkg/cli/testdata/declarative-rules/deprules
@@ -1,6 +1,6 @@
dep
----
-debug declarative-print-rules 1000024.2 dep
+debug declarative-print-rules 24.2 dep
deprules
----
- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
diff --git a/pkg/cli/testdata/declarative-rules/invalid_version b/pkg/cli/testdata/declarative-rules/invalid_version
index 136265bf1493..231ea4f78b35 100644
--- a/pkg/cli/testdata/declarative-rules/invalid_version
+++ b/pkg/cli/testdata/declarative-rules/invalid_version
@@ -4,6 +4,5 @@ invalid_version
debug declarative-print-rules 1.1 op
unsupported version number, the supported versions are:
latest
- latest
- 1000024.2
- 1000024.1
+ 24.2
+ 24.1
diff --git a/pkg/cli/testdata/explain-bundle/bundle/env.sql b/pkg/cli/testdata/explain-bundle/bundle/env.sql
index 58a18e550e52..1828b78d7285 100644
--- a/pkg/cli/testdata/explain-bundle/bundle/env.sql
+++ b/pkg/cli/testdata/explain-bundle/bundle/env.sql
@@ -22,7 +22,7 @@
-- bulkio.backup.read_with_priority_after = 1m0s (age of read-as-of time above which a BACKUP should read with priority)
-- bulkio.column_backfill.batch_size = 200 (the number of rows updated at a time to add/remove columns)
-- bulkio.index_backfill.batch_size = 50000 (the number of rows for which we construct index entries in a single batch)
--- bulkio.stream_ingestion.cutover_signal_poll_interval = 30s (the interval at which the stream ingestion job checks if it has been signaled to cutover)
+-- bulkio.stream_ingestion.failover_signal_poll_interval = 30s (the interval at which the stream ingestion job checks if it has been signaled to cutover)
-- bulkio.stream_ingestion.minimum_flush_interval = 5s (the minimum timestamp between flushes; flushes may still occur if internal buffers fill up)
-- changefeed.backfill.concurrent_scan_requests = 0 (number of concurrent scan requests per node issued during a backfill)
-- changefeed.experimental_poll_interval = 1s (polling interval for the table descriptors)
diff --git a/pkg/clusterversion/cockroach_versions.go b/pkg/clusterversion/cockroach_versions.go
index a006e6e01781..cf4e79805de8 100644
--- a/pkg/clusterversion/cockroach_versions.go
+++ b/pkg/clusterversion/cockroach_versions.go
@@ -352,7 +352,7 @@ const V24_3 = Latest
// binary in a dev cluster.
//
// See devOffsetKeyStart for more details.
-const DevelopmentBranch = true
+const DevelopmentBranch = false
// finalVersion should be set on a release branch to the minted final cluster
// version key, e.g. to V23_2 on the release-23.2 branch once it is minted.
diff --git a/pkg/cmd/bazci/bazel-github-helper/main.go b/pkg/cmd/bazci/bazel-github-helper/main.go
index 9e8fd5c77459..268aab5e81a8 100644
--- a/pkg/cmd/bazci/bazel-github-helper/main.go
+++ b/pkg/cmd/bazci/bazel-github-helper/main.go
@@ -24,6 +24,7 @@ import (
"fmt"
"os"
"os/exec"
+ "sort"
"strings"
"github.com/cockroachdb/cockroach/pkg/build/engflow"
@@ -182,6 +183,18 @@ func dumpSummary(f *os.File, invocation *engflow.InvocationInfo) error {
}
}
+ sort.Slice(failedTests, func(i, j int) bool {
+ t1 := failedTests[i]
+ t2 := failedTests[j]
+ if t1.label < t2.label {
+ return true
+ } else if t1.label == t2.label {
+ return t1.name < t2.name
+ } else {
+ return false
+ }
+ })
+
if len(failedTests) != 0 {
_, err := f.WriteString(`| Label | TestName | Status | Link |
| --- | --- | --- | --- |
diff --git a/pkg/cmd/dev/doctor.go b/pkg/cmd/dev/doctor.go
index f2f8756f45f0..0095f7e1ba84 100644
--- a/pkg/cmd/dev/doctor.go
+++ b/pkg/cmd/dev/doctor.go
@@ -294,8 +294,8 @@ Make sure one of the following lines is in the file %s/.bazelrc.user:
configured := d.checkUsingConfig(cfg.workspace, "lintonbuild") ||
d.checkUsingConfig(cfg.workspace, "nolintonbuild")
if !configured {
- return "Failed to run `bazel build //build/bazelutil:test_nogo_configured. " + `
-This may be because you haven't configured whether to run lints during builds.
+ return "Failed to find `--config=lintonbuild` or `--config=nolintonbuild` in .bazelrc.user." + `
+
Put EXACTLY ONE of the following lines in your .bazelrc.user:
build --config=lintonbuild
OR
diff --git a/pkg/cmd/drtprod/cli/commands/yamlprocessor.go b/pkg/cmd/drtprod/cli/commands/yamlprocessor.go
index c53fd81a4a71..750ab75c8a1d 100644
--- a/pkg/cmd/drtprod/cli/commands/yamlprocessor.go
+++ b/pkg/cmd/drtprod/cli/commands/yamlprocessor.go
@@ -9,7 +9,6 @@ import (
"context"
"fmt"
"os"
- "strings"
"sync"
"github.com/cockroachdb/cockroach/pkg/cmd/drtprod/helpers"
@@ -18,13 +17,13 @@ import (
)
// commandExecutor is responsible for executing the shell commands
-var commandExecutor = helpers.ExecuteCmdWithPrefix
+var commandExecutor = helpers.ExecuteCmd
// GetYamlProcessor creates a new Cobra command for processing a YAML file.
// The command expects a YAML file as an argument and runs the commands defined in it.
func GetYamlProcessor(ctx context.Context) *cobra.Command {
displayOnly := false
- userProvidedTargetNames := make([]string, 0)
+ targets := make([]string, 0)
cobraCmd := &cobra.Command{
Use: "execute [flags]",
Short: "Executes the commands in sequence as specified in the YAML",
@@ -40,12 +39,12 @@ You can also specify the rollback commands in case of a step failure.
if err != nil {
return err
}
- return processYaml(ctx, yamlContent, displayOnly, userProvidedTargetNames)
+ return processYaml(ctx, yamlContent, displayOnly, targets)
}),
}
cobraCmd.Flags().BoolVarP(&displayOnly,
"display-only", "d", false, "displays the commands that will be executed without running them")
- cobraCmd.Flags().StringArrayVarP(&userProvidedTargetNames,
+ cobraCmd.Flags().StringArrayVarP(&targets,
"targets", "t", nil, "the targets to execute. executes all if not mentioned.")
return cobraCmd
}
@@ -64,10 +63,8 @@ type step struct {
// target defines a target cluster with associated steps to be executed.
type target struct {
- TargetName string `yaml:"target_name"` // Name of the target cluster
- DependentTargets []string `yaml:"dependent_targets"` // targets should complete before starting this target
- Steps []step `yaml:"steps"` // Steps to execute on the target cluster
- commands []*command
+ TargetName string `yaml:"target_name"` // Name of the target cluster
+ Steps []step `yaml:"steps"` // Steps to execute on the target cluster
}
// yamlConfig represents the structure of the entire YAML configuration file.
@@ -95,7 +92,7 @@ func (c *command) String() string {
// processYaml reads the YAML file, parses it, sets the environment variables, and processes the targets.
func processYaml(
- ctx context.Context, yamlContent []byte, displayOnly bool, userProvidedTargetNames []string,
+ ctx context.Context, yamlContent []byte, displayOnly bool, targets []string,
) (err error) {
// Unmarshal the YAML content into the yamlConfig struct
@@ -110,7 +107,7 @@ func processYaml(
}
// Process the targets defined in the YAML
- if err = processTargets(ctx, config.Targets, displayOnly, userProvidedTargetNames); err != nil {
+ if err = processTargets(ctx, config.Targets, displayOnly, targets); err != nil {
return err
}
@@ -138,117 +135,58 @@ func setEnv(environment map[string]string, displayOnly bool) error {
// processTargets processes each target defined in the YAML configuration.
// It generates commands for each target and executes them concurrently.
func processTargets(
- ctx context.Context, targets []target, displayOnly bool, userProvidedTargetNames []string,
+ ctx context.Context, targets []target, displayOnly bool, targetNames []string,
) error {
- // targetNameMap is used to check all targets that are provided as user input
targetNameMap := make(map[string]struct{})
- for _, tn := range userProvidedTargetNames {
+ targetMap := make(map[string][]*command)
+ for _, tn := range targetNames {
targetNameMap[tn] = struct{}{}
}
- waitGroupTracker, err := buildTargetCmdsAndRegisterWaitGroups(targets, targetNameMap, userProvidedTargetNames)
- if err != nil {
- return err
- }
-
- // if displayOnly, we just print and exit
- if displayOnly {
- for _, t := range targets {
- if !shouldSkipTarget(targetNameMap, t, userProvidedTargetNames) {
- displayCommands(t)
- }
- }
- return nil
- }
- // Use a WaitGroup to wait for commands executed concurrently
- wg := sync.WaitGroup{}
- for _, t := range targets {
- if shouldSkipTarget(targetNameMap, t, userProvidedTargetNames) {
- continue
- }
- wg.Add(1)
- go func(t target) {
- // defer complete the wait group for the dependent targets to proceed
- defer waitGroupTracker[t.TargetName].Done()
- defer wg.Done()
- for _, dt := range t.DependentTargets {
- if twg, ok := waitGroupTracker[dt]; ok {
- fmt.Printf("%s: waiting on <%s>\n", t.TargetName, dt)
- // wait on the dependent targets
- // it would not matter if we wait sequentially as all dependent targets need to complete
- twg.Wait()
- }
- }
- err := executeCommands(ctx, t.TargetName, t.commands)
- if err != nil {
- fmt.Printf("%s: Error executing commands: %v\n", t.TargetName, err)
- }
- }(t)
- }
- // final wait for all targets to complete
- wg.Wait()
- return nil
-}
-
-// shouldSkipTarget returns true if the target should be skipped
-func shouldSkipTarget(
- targetNameMap map[string]struct{}, t target, userProvidedTargetNames []string,
-) bool {
- _, ok := targetNameMap[t.TargetName]
- // the targets provided in "--targets" does not contain the current target
- // so, this target is skipped
- return len(userProvidedTargetNames) > 0 && !ok
-}
-
-// buildTargetCmdsAndRegisterWaitGroups builds the commands per target and registers the target to a wait group
-// tracker and returns the same.
-// The wait group tracker is a map of target name to a wait group. A delta is added to the wait group that is
-// marked done when the specific target is complete. The wait group is use by the dependent targets to wait for
-// the completion of the target.
-func buildTargetCmdsAndRegisterWaitGroups(
- targets []target, targetNameMap map[string]struct{}, userProvidedTargetNames []string,
-) (map[string]*sync.WaitGroup, error) {
- // map of target name to a wait group. The wait group is used by dependent target to wait for the target to complete
- waitGroupTracker := make(map[string]*sync.WaitGroup)
-
- // iterate over all the targets and create all the commands that should be executed for the target
for i := 0; i < len(targets); i++ {
- // expand the environment variables
targets[i].TargetName = os.ExpandEnv(targets[i].TargetName)
t := targets[i]
- for j := 0; j < len(t.DependentTargets); j++ {
- targets[i].DependentTargets[j] = os.ExpandEnv(targets[i].DependentTargets[j])
- }
- if shouldSkipTarget(targetNameMap, t, userProvidedTargetNames) {
+ if _, ok := targetNameMap[t.TargetName]; len(targetNames) > 0 && !ok {
fmt.Printf("Ignoring execution for target %s\n", t.TargetName)
continue
}
- // add a delta wait for this target. This is added here so that when the execution loop is run, we need not
- // worry about the sequence
- waitGroupTracker[t.TargetName] = &sync.WaitGroup{}
- waitGroupTracker[t.TargetName].Add(1)
// Generate the commands for each target's steps
targetSteps, err := generateCmdsFromSteps(t.TargetName, t.Steps)
if err != nil {
- return waitGroupTracker, err
+ return err
+ }
+ targetMap[t.TargetName] = targetSteps
+ }
+
+ // Use a WaitGroup to execute commands concurrently
+ wg := sync.WaitGroup{}
+ for targetName, cmds := range targetMap {
+ if displayOnly {
+ displayCommands(targetName, cmds)
+ continue
}
- targets[i].commands = targetSteps
+ wg.Add(1)
+ go func(tn string, commands []*command) {
+ err := executeCommands(ctx, tn, commands)
+ if err != nil {
+ fmt.Printf("%s: Error executing commands: %v\n", tn, err)
+ }
+ wg.Done()
+ }(targetName, cmds)
}
- return waitGroupTracker, nil
+ wg.Wait()
+ return nil
}
// displayCommands prints the commands in stdout
-func displayCommands(t target) {
- if len(t.DependentTargets) > 0 {
- fmt.Printf("For target <%s> after [%s]:\n", t.TargetName, strings.Join(t.DependentTargets, ", "))
- } else {
- fmt.Printf("For target <%s>:\n", t.TargetName)
- }
- for _, cmd := range t.commands {
+func displayCommands(name string, cmds []*command) {
+ fmt.Printf("For target <%s>:\n", name)
+ for _, cmd := range cmds {
fmt.Printf("|-> %s\n", cmd)
for _, rCmd := range cmd.rollbackCmds {
fmt.Printf(" |-> (Rollback) %s\n", rCmd)
}
}
+
}
// executeCommands runs the list of commands for a specific target.
diff --git a/pkg/cmd/drtprod/cli/commands/yamlprocessor_test.go b/pkg/cmd/drtprod/cli/commands/yamlprocessor_test.go
index ee1cfa3a6363..5fb3cd1a403e 100644
--- a/pkg/cmd/drtprod/cli/commands/yamlprocessor_test.go
+++ b/pkg/cmd/drtprod/cli/commands/yamlprocessor_test.go
@@ -75,34 +75,17 @@ environment:
t.Run("expect no failure", func(t *testing.T) {
name1Commands := make([]string, 0)
name2Commands := make([]string, 0)
- depN1Commands := make([]string, 0)
- depN1N2Commands := make([]string, 0)
- depNotPresentCommands := make([]string, 0)
commandExecutor = func(ctx context.Context, logPrefix string, cmd string, args ...string) error {
if strings.HasPrefix(logPrefix, "name_value1") {
name1Commands = append(name1Commands, (&command{name: cmd, args: args}).String())
} else if strings.HasPrefix(logPrefix, "name_value2") {
name2Commands = append(name2Commands, (&command{name: cmd, args: args}).String())
- } else if strings.HasPrefix(logPrefix, "dependent_target_n1") {
- // expect that "name_value1" is complete by now
- require.Equal(t, 6, len(name1Commands))
- depN1Commands = append(depN1Commands, (&command{name: cmd, args: args}).String())
- } else if strings.HasPrefix(logPrefix, "dependent_target_n2_n1") {
- // expect that "name_value1" and "name_value2" is complete by now
- require.Equal(t, 6, len(name1Commands))
- require.Equal(t, 1, len(name2Commands))
- depN1N2Commands = append(depN1N2Commands, (&command{name: cmd, args: args}).String())
- } else if strings.HasPrefix(logPrefix, "dependent_target_not_present") {
- depNotPresentCommands = append(depNotPresentCommands, (&command{name: cmd, args: args}).String())
}
return nil
}
require.Nil(t, processYaml(ctx, getTestYaml(), false, nil))
require.Equal(t, 6, len(name1Commands))
require.Equal(t, 1, len(name2Commands))
- require.Equal(t, 1, len(depN1Commands))
- require.Equal(t, 1, len(depN1N2Commands))
- require.Equal(t, 1, len(depNotPresentCommands))
// the flags are maintained as map and can be in any sequence
require.True(t, strings.HasPrefix(name1Commands[0], "roachprod dummy1 name_value1 arg11"))
require.True(t, strings.Contains(name1Commands[0], "--clouds=gce"))
@@ -163,31 +146,7 @@ targets:
args:
- $NAME_2
- arg12
- - target_name: dependent_target_n1
- dependent_targets:
- - $NAME_1
- steps:
- - command: dummy2
- args:
- - $NAME_2
- - arg12
- - target_name: dependent_target_n2_n1
- dependent_targets:
- - $NAME_2
- - name_value1
- - name_value1
- steps:
- - command: dummy2
- args:
- - $NAME_2
- - arg12
- - target_name: dependent_target_not_present
- dependent_targets:
- - not_present
- steps:
- - command: dummy2
- args:
- - $NAME_2
- - arg12
+
+
`)
}
diff --git a/pkg/cmd/drtprod/cli/handlers.go b/pkg/cmd/drtprod/cli/handlers.go
index 969392821974..4ae3a0ae11c7 100644
--- a/pkg/cmd/drtprod/cli/handlers.go
+++ b/pkg/cmd/drtprod/cli/handlers.go
@@ -38,7 +38,7 @@ func Initialize(ctx context.Context) {
if err != nil {
if strings.Contains(err.Error(), "unknown command") {
// Command not found, execute it in roachprod instead.
- _ = helpers.ExecuteCmdInteractive(ctx, "roachprod", os.Args[1:]...)
+ _ = helpers.ExecuteCmd(ctx, "roachprod", "roachprod", os.Args[1:]...)
return
}
// If another error occurs, exit with a failure status.
diff --git a/pkg/cmd/drtprod/configs/drt_chaos.yaml b/pkg/cmd/drtprod/configs/drt_chaos.yaml
index d61fe04d53bb..a3e62fbb3b90 100644
--- a/pkg/cmd/drtprod/configs/drt_chaos.yaml
+++ b/pkg/cmd/drtprod/configs/drt_chaos.yaml
@@ -87,44 +87,6 @@ targets:
- $WORKLOAD_CLUSTER
- workload
- script: "pkg/cmd/drtprod/scripts/setup_datadog_workload"
- - target_name: post_tasks
- dependent_targets:
- - $CLUSTER
- - $WORKLOAD_CLUSTER
- steps:
- - script: rm
- args:
- - -rf
- - certs-$CLUSTER
- - command: get
- args:
- - $CLUSTER:1
- - certs
- - certs-$CLUSTER
- - command: ssh
- args:
- - $WORKLOAD_CLUSTER
- - --
- - sudo
- - rm
- - -rf
- - certs
- - command: put
- args:
- - $WORKLOAD_CLUSTER
- - certs-$CLUSTER
- - certs
- - command: ssh
- args:
- - $WORKLOAD_CLUSTER
- - --
- - chmod
- - 600
- - './certs/*'
- script: "pkg/cmd/drtprod/scripts/tpcc_init.sh"
- args:
- - cct_tpcc # suffix added to script name tpcc_init_cct_tpcc.sh
- - true # determines whether to execute the script immediately on workload node
flags:
warehouses: 12000
- db: cct_tpcc
diff --git a/pkg/cmd/drtprod/configs/drt_large.yaml b/pkg/cmd/drtprod/configs/drt_large.yaml
index 9cb2613395f5..a6080097bc7e 100644
--- a/pkg/cmd/drtprod/configs/drt_large.yaml
+++ b/pkg/cmd/drtprod/configs/drt_large.yaml
@@ -7,19 +7,6 @@ environment:
ROACHPROD_GCE_DEFAULT_PROJECT: cockroach-drt
CLUSTER: drt-large
WORKLOAD_CLUSTER: workload-large
- STORE_COUNT: 8
-
- # variables used by tpcc_run_multiregion.sh
- NUM_REGIONS: 3
- NODES_PER_REGION: 5
- REGIONS: northamerica-northeast2,us-east5,us-east1
- TPCC_WAREHOUSES: 150000
- DB_NAME: cct_tpcc
- SURVIVAL_GOAL: region
- RUN_DURATION: 12h
- NUM_CONNECTIONS: 500
- NUM_WORKERS: 500
- MAX_RATE: 1000
targets:
- target_name: $CLUSTER
@@ -36,7 +23,7 @@ targets:
nodes: 15
gce-machine-type: n2-standard-16
local-ssd: true
- gce-local-ssd-count: $STORE_COUNT
+ gce-local-ssd-count: 4
os-volume-size: 100
username: drt
lifetime: 8760h
@@ -59,7 +46,7 @@ targets:
- "./cockroach"
flags:
enable-fluent-sink: true
- store-count: $STORE_COUNT
+ store-count: 4
args: --wal-failover=among-stores
restart: false
sql-port: 26257
@@ -92,7 +79,7 @@ targets:
flags:
clouds: gce
gce-zones: "northamerica-northeast2-a,us-east5-a,us-east1-b"
- nodes: $NUM_REGIONS
+ nodes: 3
gce-machine-type: n2d-standard-4
os-volume-size: 100
username: workload
@@ -113,48 +100,6 @@ targets:
- $WORKLOAD_CLUSTER
- workload
- script: "pkg/cmd/drtprod/scripts/setup_datadog_workload"
- - target_name: post_tasks
- dependent_targets:
- - $CLUSTER
- - $WORKLOAD_CLUSTER
- steps:
- - script: rm
- args:
- - -rf
- - certs-$CLUSTER
- - command: get
- args:
- - $CLUSTER:1
- - certs
- - certs-$CLUSTER
- - command: ssh
- args:
- - $WORKLOAD_CLUSTER
- - --
- - sudo
- - rm
- - -rf
- - certs
- - command: put
- args:
- - $WORKLOAD_CLUSTER
- - certs-$CLUSTER
- - certs
- - command: ssh
- args:
- - $WORKLOAD_CLUSTER
- - --
- - chmod
- - 600
- - './certs/*'
- script: "pkg/cmd/drtprod/scripts/tpcc_init.sh"
- args:
- - cct_tpcc # suffix added to script name tpcc_init_cct_tpcc.sh
- - true # determines whether to execute the script immediately on workload node
flags:
- warehouses: $TPCC_WAREHOUSES
- partitions: $NUM_REGIONS
- db: $DB_NAME
- survival-goal: $SURVIVAL_GOAL
- regions: $REGIONS
- - script: "pkg/cmd/drtprod/scripts/tpcc_run_multiregion.sh"
+ warehouses: 15000
diff --git a/pkg/cmd/drtprod/configs/drt_scale.yaml b/pkg/cmd/drtprod/configs/drt_scale.yaml
index 4b52961aa0c7..a2977aa26196 100644
--- a/pkg/cmd/drtprod/configs/drt_scale.yaml
+++ b/pkg/cmd/drtprod/configs/drt_scale.yaml
@@ -8,8 +8,6 @@ environment:
ROACHPROD_GCE_DEFAULT_PROJECT: cockroach-drt
CLUSTER: drt-scale
WORKLOAD_CLUSTER: workload-scale
- CLUSTER_NODES: 150
- WORKLOAD_NODES: 9
targets:
# crdb cluster specs
@@ -23,12 +21,10 @@ targets:
gce-managed: true
gce-enable-multiple-stores: true
gce-zones: "us-central1-a"
- nodes: $CLUSTER_NODES
+ nodes: 150
gce-machine-type: n2-standard-16
- local-ssd: false
- gce-pd-volume-size: 375
- gce-pd-volume-type: pd-ssd
- gce-pd-volume-count: 4
+ local-ssd: true
+ gce-local-ssd-count: 4
os-volume-size: 100
username: drt
lifetime: 8760h
@@ -53,21 +49,23 @@ targets:
args: --wal-failover=among-stores
restart: false
sql-port: 26257
+ on_rollback:
+ - command: stop
+ args:
+ - $CLUSTER
- command: run
args:
- $CLUSTER
- --
- "sudo systemctl unmask cron.service ; sudo systemctl enable cron.service ; echo \"crontab -l ; echo '@reboot sleep 100 && ~/cockroach.sh' | crontab -\" > t.sh ; sh t.sh ; rm t.sh"
# workload cluster specs
- - target_name: $WORKLOAD_CLUSTER
- steps:
- command: create
args:
- $WORKLOAD_CLUSTER
flags:
clouds: gce
gce-zones: "us-central1-a"
- nodes: $WORKLOAD_NODES
+ nodes: 9
gce-machine-type: n2-standard-8
os-volume-size: 100
username: workload
@@ -88,11 +86,6 @@ targets:
- $WORKLOAD_CLUSTER
- workload
- script: "pkg/cmd/drtprod/scripts/setup_datadog_workload"
- - target_name: post_tasks
- dependent_targets:
- - $CLUSTER
- - $WORKLOAD_CLUSTER
- steps:
- script: rm
args:
- -rf
@@ -113,52 +106,23 @@ targets:
- --
- chmod
- 600
- - './certs/*'
+ - certs/*
- command: put
args:
- $WORKLOAD_CLUSTER
- - artifacts/roachprod
+ - bin/roachprod
- roachprod
- command: put
args:
- $WORKLOAD_CLUSTER
- - artifacts/roachtest
+ - bin/roachtest
- roachtest-operations
- - script: "pkg/cmd/drtprod/scripts/tpcc_init.sh"
+ - command: put
args:
- - cct_tpcc_400k # suffix added to script name tpcc_init_cct_tpcc_400k.sh
- - false # determines whether to execute the script immediately on workload node
- flags:
- warehouses: 400000
- db: cct_tpcc
+ - $WORKLOAD_CLUSTER
+ - pkg/cmd/drt/scripts/roachtest_operations_run.sh
+ - roachtest_operations_run.sh
- script: "pkg/cmd/drtprod/scripts/tpcc_init.sh"
- args:
- - cct_tpcc_2000k # suffix added to script name tpcc_init_cct_tpcc_2000k.sh
- - false # determines whether to execute the script immediately on workload node
- flags:
- warehouses: 2000000
- db: cct_tpcc_big
- - script: "pkg/cmd/drtprod/scripts/generate_tpcc_run.sh"
- args:
- - cct_tpcc_400k # suffix added to script name tpcc_run_400k.sh
- - false # determines whether to execute the script immediately on workload node
flags:
+ warehouses: 100000
db: cct_tpcc
- warehouses: 400000
- max-rate: 10000
- workers: 5000
- conns: 5000
- duration: 12h
- ramp: 10m
- wait: 0
- - script: "pkg/cmd/drtprod/scripts/tpch_init.sh"
- args:
- - scale_factor_1000 # suffix added to script name tpch_init_scale_factor_1000.sh
- - false # determines whether to execute the script immediately on workload node
- flags:
- scale-factor: 1000
- - script: "pkg/cmd/drtprod/scripts/generate_tpch_run.sh"
- args:
- - scale_factor_1000
- flags:
- scale-factor: 1000
diff --git a/pkg/cmd/drtprod/configs/drt_scale_operations.yaml b/pkg/cmd/drtprod/configs/drt_scale_operations.yaml
index 412995b46acc..becfa51be963 100644
--- a/pkg/cmd/drtprod/configs/drt_scale_operations.yaml
+++ b/pkg/cmd/drtprod/configs/drt_scale_operations.yaml
@@ -13,5 +13,5 @@ targets:
steps:
- script: "pkg/cmd/drtprod/scripts/create_run_operation.sh"
args:
- - "schema_change,add-column|add-index"
+ - "schema_change,add-column|add-index,0 0 * * *" # runs every day at 12 AM
- "kill_stall,disk-stall|network-partition|node-kill,0 * * * *" # runs every 1 hour
diff --git a/pkg/cmd/drtprod/configs/drt_test.yaml b/pkg/cmd/drtprod/configs/drt_test.yaml
index 7ba2bbeb65d4..6185541a462f 100644
--- a/pkg/cmd/drtprod/configs/drt_test.yaml
+++ b/pkg/cmd/drtprod/configs/drt_test.yaml
@@ -37,25 +37,3 @@ targets:
username: workload
on_rollback:
- command: destroy
- - target_name: post_tasks
- dependent_targets:
- - $CLUSTER
- - $WORKLOAD_CLUSTER
- steps:
- - command: get
- args:
- - $CLUSTER:1
- - certs
- - certs-$CLUSTER
- - command: put
- args:
- - $WORKLOAD_CLUSTER
- - certs-$CLUSTER
- - certs
- - command: ssh
- args:
- - $WORKLOAD_CLUSTER
- - --
- - chmod
- - 600
- - './certs/*'
diff --git a/pkg/cmd/drtprod/helpers/utils.go b/pkg/cmd/drtprod/helpers/utils.go
index 88d77a913626..179ea3ba1a2c 100644
--- a/pkg/cmd/drtprod/helpers/utils.go
+++ b/pkg/cmd/drtprod/helpers/utils.go
@@ -40,9 +40,8 @@ func Wrap(f func(cmd *cobra.Command, args []string) error) func(cmd *cobra.Comma
}
}
-// ExecuteCmdWithPrefix runs a shell command with the given arguments and streams the output.
-// it also adds the specified prefixes
-func ExecuteCmdWithPrefix(ctx context.Context, logPrefix string, cmd string, args ...string) error {
+// ExecuteCmd runs a shell command with the given arguments and streams the output.
+func ExecuteCmd(ctx context.Context, logPrefix string, cmd string, args ...string) error {
// Create a command with the given context and arguments.
c := exec.CommandContext(ctx, cmd, args...)
@@ -56,6 +55,12 @@ func ExecuteCmdWithPrefix(ctx context.Context, logPrefix string, cmd string, arg
return err
}
+ // Start the command execution
+ err = c.Start()
+ if err != nil {
+ return err
+ }
+
// Stream stdout output
outScanner := bufio.NewScanner(stdout)
outScanner.Split(bufio.ScanLines)
@@ -77,17 +82,5 @@ func ExecuteCmdWithPrefix(ctx context.Context, logPrefix string, cmd string, arg
}()
// Wait for the command to complete and return any errors encountered.
- return c.Run()
-}
-
-// ExecuteCmdInteractive runs a shell command with the given arguments and creates an interactive shell.
-func ExecuteCmdInteractive(ctx context.Context, cmd string, args ...string) error {
- // Create a command with the given context and arguments.
- c := exec.CommandContext(ctx, cmd, args...)
-
- // redirect stdin, stdout and stderr
- c.Stdin, c.Stdout, c.Stderr = os.Stdin, os.Stdout, os.Stderr
-
- // Run the command execution
- return c.Run()
+ return c.Wait()
}
diff --git a/pkg/cmd/drtprod/scripts/create_run_operation.sh b/pkg/cmd/drtprod/scripts/create_run_operation.sh
index b3a54514c207..014100cf5342 100755
--- a/pkg/cmd/drtprod/scripts/create_run_operation.sh
+++ b/pkg/cmd/drtprod/scripts/create_run_operation.sh
@@ -31,9 +31,6 @@ if [ -z "${dd_api_key}" ]; then
exit 1
fi
-# sync cluster is needed for operations
-drtprod ssh ${WORKLOAD_CLUSTER} -- "ROACHPROD_GCE_DEFAULT_PROJECT=${ROACHPROD_GCE_DEFAULT_PROJECT} ./roachprod sync"
-
# the ssh keys of all workload nodes should be setup on the crdb nodes for the operations
roachprod ssh ${CLUSTER} -- "echo \"$(roachprod run ${WORKLOAD_CLUSTER} -- cat ./.ssh/id_rsa.pub|grep ssh-rsa)\" >> ./.ssh/authorized_keys"
diff --git a/pkg/cmd/drtprod/scripts/generate_tpcc_run.sh b/pkg/cmd/drtprod/scripts/generate_tpcc_run.sh
deleted file mode 100755
index 29bc5bd54866..000000000000
--- a/pkg/cmd/drtprod/scripts/generate_tpcc_run.sh
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/bin/bash
-
-# Copyright 2024 The Cockroach Authors.
-#
-# Use of this software is governed by the CockroachDB Software License
-# included in the /LICENSE file.
-
-# This script sets up the tpcc run workload script in the workload nodes
-# The --warehouses flag is passed as argument to this script
-# NOTE - This uses CLUSTER and WORKLOAD_CLUSTER environment variable, if not set the script fails
-if [ "$#" -lt 2 ]; then
- echo "Usage: $0 "
- exit 1
-fi
-suffix=$1
-shift
-# The second argument represents whether the init process should be started in the workload cluster
-# The value is true or false
-if [ "$1" != "true" ] && [ "$1" != "false" ]; then
- # $1 is used again because of the shift
- echo "Error: The second argument must be 'true' or 'false' which implies whether the script should be started in background or not."
- exit 1
-fi
-execute_script=$1
-shift
-
-if [ -z "${CLUSTER}" ]; then
- echo "environment CLUSTER is not set"
- exit 1
-fi
-
-if [ -z "${WORKLOAD_CLUSTER}" ]; then
- echo "environment WORKLOAD_CLUSTER is not set"
- exit 1
-fi
-
-if [ -z "${WORKLOAD_NODES}" ]; then
- echo "environment WORKLOAD_NODES is not set"
- exit 1
-fi
-
-# Prepare PGURLS
-PGURLS=$(roachprod pgurl $CLUSTER | sed s/\'//g)
-
-# Loop through each node
-for NODE in $(seq 1 $WORKLOAD_NODES)
-do
- # Create the workload script
- cat </tmp/tpcc_run_${suffix}.sh
-#!/usr/bin/env bash
-
-read -r -a PGURLS_ARR <<< "$PGURLS"
-
-j=0
-while true; do
- echo ">> Starting tpcc workload"
- ((j++))
- LOG=./tpcc_\$j.txt
- ./cockroach workload run tpcc $@ \
- --tolerate-errors \
- --families \
- "\${PGURLS_ARR[@]}" | tee \$LOG
- if [ \$? -eq 0 ]; then
- rm "\$LOG"
- fi
- sleep 1
-done
-EOF
-
- # Upload the script to the workload cluster
- roachprod put $WORKLOAD_CLUSTER:$NODE /tmp/tpcc_run_${suffix}.sh
- roachprod ssh $WORKLOAD_CLUSTER:$NODE -- "chmod +x tpcc_run_${suffix}.sh"
- if [ "$execute_script" = "true" ]; then
- roachprod run "${WORKLOAD_CLUSTER}":1 -- "sudo systemd-run --unit tpcc_run_${suffix} --same-dir --uid \$(id -u) --gid \$(id -g) bash ${pwd}/tpcc_run_${suffix}.sh"
- fi
-done
diff --git a/pkg/cmd/drtprod/scripts/generate_tpch_run.sh b/pkg/cmd/drtprod/scripts/generate_tpch_run.sh
deleted file mode 100755
index 1a13a0e52c21..000000000000
--- a/pkg/cmd/drtprod/scripts/generate_tpch_run.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-# Copyright 2024 The Cockroach Authors.
-#
-# Use of this software is governed by the CockroachDB Software License
-# included in the /LICENSE file.
-
-# This script sets up the tpch run workload script in the workload nodes
-# The --scale-factor and other flags are passed as argument to this script
-# NOTE - This uses CLUSTER and WORKLOAD_CLUSTER environment variable, if not set the script fails
-if [ "$#" -lt 2 ]; then
- echo "Usage: $0 "
- exit 1
-fi
-suffix=$1
-shift
-
-if [ -z "${CLUSTER}" ]; then
- echo "environment CLUSTER is not set"
- exit 1
-fi
-
-if [ -z "${WORKLOAD_CLUSTER}" ]; then
- echo "environment WORKLOAD_CLUSTER is not set"
- exit 1
-fi
-
-absolute_path=$(roachprod run "${WORKLOAD_CLUSTER}":1 -- "realpath ./tpch_run_${suffix}.sh")
-pwd=$(roachprod run "${WORKLOAD_CLUSTER}":1 -- "dirname ${absolute_path}")
-PGURLS=$(roachprod pgurl "${CLUSTER}":1)
-
-roachprod ssh "${WORKLOAD_CLUSTER}":1 -- "tee tpch_run_${suffix}.sh > /dev/null << 'EOF'
-#!/bin/bash
-
-${pwd}/cockroach workload run tpch $@ --verbose --prometheus-port 2113 $PGURLS
-EOF"
-roachprod ssh "${WORKLOAD_CLUSTER}":1 -- "chmod +x tpch_run_${suffix}.sh"
diff --git a/pkg/cmd/drtprod/scripts/setup_datadog_workload b/pkg/cmd/drtprod/scripts/setup_datadog_workload
index 74e9bb6b11d2..de3db5036edf 100755
--- a/pkg/cmd/drtprod/scripts/setup_datadog_workload
+++ b/pkg/cmd/drtprod/scripts/setup_datadog_workload
@@ -76,7 +76,6 @@ processors:
expressions:
- workload_tpcc.*
- workload_kv_.*
- - workload_tpch.*
# The */datadog elements are defined in the primary configuration file.
service:
diff --git a/pkg/cmd/drtprod/scripts/tpcc_init.sh b/pkg/cmd/drtprod/scripts/tpcc_init.sh
index 55b0d4821ce7..a9122a6c7cf6 100755
--- a/pkg/cmd/drtprod/scripts/tpcc_init.sh
+++ b/pkg/cmd/drtprod/scripts/tpcc_init.sh
@@ -9,23 +9,6 @@
# The --warehouses and other flags for import are passed as argument to this script
# NOTE - This uses CLUSTER and WORKLOAD_CLUSTER environment variable, if not set the script fails
-# The first argument is the name suffix that is added to the script as tpcc_init_.sh
-if [ "$#" -lt 4 ]; then
- echo "Usage: $0 "
- exit 1
-fi
-suffix=$1
-shift
-# The second argument represents whether the init process should be started in the workload cluster
-# The value is true or false
-if [ "$1" != "true" ] && [ "$1" != "false" ]; then
- # $1 is used again because of the shift
- echo "Error: The second argument must be 'true' or 'false' which implies whether the script should be started in background or not."
- exit 1
-fi
-execute_script=$1
-shift
-
if [ -z "${CLUSTER}" ]; then
echo "environment CLUSTER is not set"
exit 1
@@ -36,18 +19,19 @@ if [ -z "${WORKLOAD_CLUSTER}" ]; then
exit 1
fi
-absolute_path=$(roachprod run "${WORKLOAD_CLUSTER}":1 -- "realpath ./tpcc_init_${suffix}.sh")
+absolute_path=$(roachprod run "${WORKLOAD_CLUSTER}":1 -- "realpath ./tpcc_init.sh")
pwd=$(roachprod run "${WORKLOAD_CLUSTER}":1 -- "dirname ${absolute_path}")
-PGURLS=$(roachprod pgurl "${CLUSTER}":1)
# script is responsible for importing the tpcc database for workload
-roachprod ssh "${WORKLOAD_CLUSTER}":1 -- "tee tpcc_init_${suffix}.sh > /dev/null << 'EOF'
+roachprod ssh "${WORKLOAD_CLUSTER}":1 -- "tee tpcc_init.sh > /dev/null << 'EOF'
#!/bin/bash
-${pwd}/cockroach workload fixtures import tpcc $PGURLS $@ --checks=false
+export ROACHPROD_GCE_DEFAULT_PROJECT=${ROACHPROD_GCE_DEFAULT_PROJECT}
+export ROACHPROD_DNS=${ROACHPROD_DNS}
+${pwd}/roachprod sync
+sleep 20
+PGURLS=\$(${pwd}/roachprod pgurl ${CLUSTER} | sed s/\'//g)
+${pwd}/cockroach workload init tpcc $@ --secure --families \$PGURLS
EOF"
-roachprod ssh "${WORKLOAD_CLUSTER}":1 -- "chmod +x tpcc_init_${suffix}.sh"
-
-if [ "$execute_script" = "true" ]; then
- roachprod run "${WORKLOAD_CLUSTER}":1 -- "sudo systemd-run --unit tpcc_init_${suffix} --same-dir --uid \$(id -u) --gid \$(id -g) bash ${pwd}/tpcc_init_${suffix}.sh"
-fi
+roachprod ssh "${WORKLOAD_CLUSTER}":1 -- "chmod +x tpcc_init.sh"
+roachprod run "${WORKLOAD_CLUSTER}":1 -- "sudo systemd-run --unit tpccinit --same-dir --uid \$(id -u) --gid \$(id -g) bash ${pwd}/tpcc_init.sh"
diff --git a/pkg/cmd/drtprod/scripts/tpcc_run_multiregion.sh b/pkg/cmd/drtprod/scripts/tpcc_run_multiregion.sh
deleted file mode 100755
index 0359a0954c06..000000000000
--- a/pkg/cmd/drtprod/scripts/tpcc_run_multiregion.sh
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-# Copyright 2024 The Cockroach Authors.
-#
-# Use of this software is governed by the CockroachDB Software License
-# included in the /LICENSE file.
-
-# This script sets up the tpcc multiregion run configuration on the workload node.
-
-env_vars=("CLUSTER" "WORKLOAD_CLUSTER" "NUM_REGIONS" "NODES_PER_REGION" "REGIONS" "TPCC_WAREHOUSES" "DB_NAME" "RUN_DURATION" "NUM_CONNECTIONS" "NUM_WORKERS" "MAX_RATE")
-for var in "${env_vars[@]}"; do
- if [ -z "${!var}" ]; then
- echo "$var is not set"
- exit
- fi
-done
-
-for NODE in $(seq 1 $NUM_REGIONS)
-do
- NODE_OFFSET=$(($(($(($NODE - 1))*$NODES_PER_REGION))+1))
- LAST_NODE_IN_REGION=$(($NODE_OFFSET+$NODES_PER_REGION-1))
- # Since we're running a number of workers much smaller than the number of
- # warehouses, we have to do some strange math here. Workers are assigned to
- # warehouses in order (i.e. worker 1 will target warehouse 1). The
- # complication is that when we're partitioning the workload such that workers in
- # region 1 should only target warehouses in region 1, the workload binary will
- # not assign a worker if the warehouse is not in the specified region. As a
- # result, we must pass in a number of workers that is large enough to allow
- # us to reach the specified region, and then add the actual number of workers
- # we want to run.
- EFFECTIVE_NUM_WORKERS=$(($(($TPCC_WAREHOUSES/$NUM_REGIONS))*$(($NODE-1))+$NUM_WORKERS))
- PGURLS_REGION=$(./bin/roachprod pgurl $CLUSTER:$NODE_OFFSET-$LAST_NODE_IN_REGION | sed "s/'//g; s/^/'/; s/$/'/")
- cat </tmp/tpcc_run.sh
-#!/usr/bin/env bash
-j=0
-while true; do
- echo ">> Starting tpcc workload"
- ((j++))
- ./workload run tpcc \
- --db=$DB_NAME \
- --ramp=10m \
- --conns=$NUM_CONNECTIONS \
- --workers=$EFFECTIVE_NUM_WORKERS \
- --warehouses=$TPCC_WAREHOUSES \
- --max-rate=$MAX_RATE \
- --duration=$RUN_DURATION \
- --wait=false \
- --partitions=$NUM_REGIONS \
- --partition-affinity=$(($NODE-1)) \
- --tolerate-errors \
- $PGURLS_REGION \
- --survival-goal region \
- --regions=$REGIONS
-done
-EOF
-
- ./bin/drtprod put $WORKLOAD_CLUSTER:$NODE /tmp/tpcc_run.sh
- ./bin/drtprod ssh $WORKLOAD_CLUSTER:$NODE -- "chmod +x tpcc_run.sh"
-done
diff --git a/pkg/cmd/drtprod/scripts/tpch_init.sh b/pkg/cmd/drtprod/scripts/tpch_init.sh
deleted file mode 100755
index 5aa89fd40996..000000000000
--- a/pkg/cmd/drtprod/scripts/tpch_init.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/bin/bash
-
-# Copyright 2024 The Cockroach Authors.
-#
-# Use of this software is governed by the CockroachDB Software License
-# included in the /LICENSE file.
-
-# This script sets up the tpch import workload script in the workload node
-# The --scale-factor and other flags for import are passed as argument to this script
-# NOTE - This uses CLUSTER and WORKLOAD_CLUSTER environment variable, if not set the script fails
-
-# The first argument is the name suffix that is added to the script as tpch_init_.sh
-if [ "$#" -lt 3 ]; then
- echo "Usage: $0 "
- exit 1
-fi
-suffix=$1
-shift
-# The second argument represents whether the init process should be started in the workload cluster
-# The value is true or false
-if [ "$1" != "true" ] && [ "$1" != "false" ]; then
- # $1 is used again because of the shift
- echo "Error: The second argument must be 'true' or 'false' which implies whether the script should be started in background or not."
- exit 1
-fi
-execute_script=$1
-shift
-
-if [ -z "${CLUSTER}" ]; then
- echo "environment CLUSTER is not set"
- exit 1
-fi
-
-if [ -z "${WORKLOAD_CLUSTER}" ]; then
- echo "environment WORKLOAD_CLUSTER is not set"
- exit 1
-fi
-
-absolute_path=$(roachprod run "${WORKLOAD_CLUSTER}":1 -- "realpath ./tpch_init_${suffix}.sh")
-pwd=$(roachprod run "${WORKLOAD_CLUSTER}":1 -- "dirname ${absolute_path}")
-PGURLS=$(roachprod pgurl "${CLUSTER}":1)
-
-# script is responsible for importing the tpch database for workload
-roachprod ssh "${WORKLOAD_CLUSTER}":1 -- "tee tpch_init_${suffix}.sh > /dev/null << 'EOF'
-#!/bin/bash
-
-${pwd}/cockroach workload init tpch $@ $PGURLS
-EOF"
-roachprod ssh "${WORKLOAD_CLUSTER}":1 -- "chmod +x tpch_init_${suffix}.sh"
-
-if [ "$execute_script" = "true" ]; then
- roachprod run "${WORKLOAD_CLUSTER}":1 -- "sudo systemd-run --unit tpch_init_${suffix} --same-dir --uid \$(id -u) --gid \$(id -g) bash ${pwd}/tpch_init_${suffix}.sh"
-fi
diff --git a/pkg/cmd/roachtest/cluster.go b/pkg/cmd/roachtest/cluster.go
index 4beb5d94325d..e016740e30f0 100644
--- a/pkg/cmd/roachtest/cluster.go
+++ b/pkg/cmd/roachtest/cluster.go
@@ -1865,7 +1865,9 @@ func (c *clusterImpl) doDestroy(ctx context.Context, l *logger.Logger) <-chan st
}
func (c *clusterImpl) addLabels(labels map[string]string) error {
- return roachprod.AddLabels(c.l, c.name, labels)
+ // N.B. we must sanitize the values; e.g., some test names can exceed the maximum length (63 chars in GCE).
+ // N.B. we don't sanitize the keys; unlike values, they are typically _not_ (dynamically) generated.
+ return roachprod.AddLabels(c.l, c.name, vm.SanitizeLabelValues(labels))
}
func (c *clusterImpl) removeLabels(labels []string) error {
diff --git a/pkg/cmd/roachtest/operations/BUILD.bazel b/pkg/cmd/roachtest/operations/BUILD.bazel
index 43d1ce7e6b2d..e05eae17d6cf 100644
--- a/pkg/cmd/roachtest/operations/BUILD.bazel
+++ b/pkg/cmd/roachtest/operations/BUILD.bazel
@@ -35,7 +35,6 @@ go_library(
"//pkg/testutils/fingerprintutils",
"//pkg/util/hlc",
"//pkg/util/randutil",
- "//pkg/util/retry",
"//pkg/util/timeutil",
"@com_github_cockroachdb_errors//:errors",
],
diff --git a/pkg/cmd/roachtest/operations/add_column.go b/pkg/cmd/roachtest/operations/add_column.go
index 68d64217bbc5..10027e2d1edf 100644
--- a/pkg/cmd/roachtest/operations/add_column.go
+++ b/pkg/cmd/roachtest/operations/add_column.go
@@ -20,7 +20,6 @@ import (
type cleanupAddedColumn struct {
db, table, column string
- locked bool
}
func (cl *cleanupAddedColumn) Cleanup(
@@ -29,10 +28,6 @@ func (cl *cleanupAddedColumn) Cleanup(
conn := c.Conn(ctx, o.L(), 1, option.VirtualClusterName(roachtestflags.VirtualCluster))
defer conn.Close()
- if cl.locked {
- setSchemaLocked(ctx, o, conn, cl.db, cl.table, false /* lock */)
- defer setSchemaLocked(ctx, o, conn, cl.db, cl.table, true /* lock */)
- }
o.Status(fmt.Sprintf("dropping column %s", cl.column))
_, err := conn.ExecContext(ctx, fmt.Sprintf("ALTER TABLE %s.%s DROP COLUMN %s CASCADE", cl.db, cl.table, cl.column))
if err != nil {
@@ -63,17 +58,6 @@ func runAddColumn(
colQualification += " NOT NULL"
}
- // If the table's schema is locked, then unlock the table and make sure it will
- // be re-locked during cleanup.
- // TODO(#129694): Remove schema unlocking/re-locking once automation is internalized.
- locked := isSchemaLocked(o, conn, dbName, tableName)
- if locked {
- setSchemaLocked(ctx, o, conn, dbName, tableName, false /* lock */)
- // Re-lock the table if necessary, so that it stays locked during any wait
- // period before cleanup.
- defer setSchemaLocked(ctx, o, conn, dbName, tableName, true /* lock */)
- }
-
o.Status(fmt.Sprintf("adding column %s to table %s.%s", colName, dbName, tableName))
addColStmt := fmt.Sprintf("ALTER TABLE %s.%s ADD COLUMN %s VARCHAR %s", dbName, tableName, colName, colQualification)
_, err := conn.ExecContext(ctx, addColStmt)
@@ -82,12 +66,10 @@ func runAddColumn(
}
o.Status(fmt.Sprintf("column %s created", colName))
-
return &cleanupAddedColumn{
db: dbName,
table: tableName,
column: colName,
- locked: locked,
}
}
diff --git a/pkg/cmd/roachtest/operations/add_index.go b/pkg/cmd/roachtest/operations/add_index.go
index e67b8f53772c..72f7957954eb 100644
--- a/pkg/cmd/roachtest/operations/add_index.go
+++ b/pkg/cmd/roachtest/operations/add_index.go
@@ -20,7 +20,6 @@ import (
type cleanupAddedIndex struct {
db, table, index string
- locked bool
}
func (cl *cleanupAddedIndex) Cleanup(
@@ -29,10 +28,6 @@ func (cl *cleanupAddedIndex) Cleanup(
conn := c.Conn(ctx, o.L(), 1, option.VirtualClusterName(roachtestflags.VirtualCluster))
defer conn.Close()
- if cl.locked {
- setSchemaLocked(ctx, o, conn, cl.db, cl.table, false /* lock */)
- defer setSchemaLocked(ctx, o, conn, cl.db, cl.table, true /* lock */)
- }
o.Status(fmt.Sprintf("dropping index %s", cl.index))
_, err := conn.ExecContext(ctx, fmt.Sprintf("DROP INDEX %s.%s@%s", cl.db, cl.table, cl.index))
if err != nil {
@@ -63,15 +58,6 @@ func runAddIndex(
o.Fatal(err)
}
- // If the table's schema is locked, then unlock the table and make sure it will
- // be re-locked during cleanup.
- // TODO(#129694): Remove schema unlocking/re-locking once automation is internalized.
- locked := isSchemaLocked(o, conn, dbName, tableName)
- if locked {
- setSchemaLocked(ctx, o, conn, dbName, tableName, false /* lock */)
- defer setSchemaLocked(ctx, o, conn, dbName, tableName, true /* lock */)
- }
-
indexName := fmt.Sprintf("add_index_op_%d", rng.Uint32())
o.Status(fmt.Sprintf("adding index to column %s in table %s.%s", colName, dbName, tableName))
createIndexStmt := fmt.Sprintf("CREATE INDEX %s ON %s.%s (%s)", indexName, dbName, tableName, colName)
@@ -81,12 +67,10 @@ func runAddIndex(
}
o.Status(fmt.Sprintf("index %s created", indexName))
-
return &cleanupAddedIndex{
- db: dbName,
- table: tableName,
- index: indexName,
- locked: locked,
+ db: dbName,
+ table: tableName,
+ index: indexName,
}
}
diff --git a/pkg/cmd/roachtest/operations/utils.go b/pkg/cmd/roachtest/operations/utils.go
index a927c1d0b760..6b0f99a17ef9 100644
--- a/pkg/cmd/roachtest/operations/utils.go
+++ b/pkg/cmd/roachtest/operations/utils.go
@@ -9,15 +9,12 @@ import (
"context"
gosql "database/sql"
"fmt"
- "strings"
- "time"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/operation"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
- "github.com/cockroachdb/cockroach/pkg/util/retry"
)
// systemDBs lists dbs created by default on a new cockroachdb cluster. These
@@ -113,23 +110,7 @@ func drainNode(
MaybeOption(!c.IsSecure(), "insecure").
Option("self")
- // On the drt-cluster, the drain process has been observed to fail intermittently,
- // causing the node to reject SQL client connections while remaining healthy for other subsystems.
- // To make the node accept SQL connections again, a manual restart is required.
- // To avoid manual intervention, the drain operation will be retried a few times before failing the operation.
- // Once the GitHub issue (https://github.com/cockroachdb/cockroach/issues/130853) is fixed, fallback to c.Run without retries.
- opts := retry.Options{
- InitialBackoff: 1 * time.Second,
- MaxBackoff: 5 * time.Second,
- MaxRetries: 3,
- }
- for r := retry.StartWithCtx(ctx, opts); r.Next(); {
- err = c.RunE(ctx, option.WithNodes(node), cmd.String())
- if err == nil {
- return
- }
- }
- o.Fatalf("drain failed: %v", err)
+ c.Run(ctx, option.WithNodes(node), cmd.String())
}
func decommissionNode(
@@ -174,26 +155,3 @@ func pickRandomStore(ctx context.Context, o operation.Operation, conn *gosql.DB,
}
return stores[rng.Intn(len(stores))]
}
-
-// Returns true if the schema_locked parameter is set on this table.
-func isSchemaLocked(o operation.Operation, conn *gosql.DB, db, tbl string) bool {
- showTblStmt := fmt.Sprintf("SHOW CREATE %s.%s", db, tbl)
- var tblName, createStmt string
- err := conn.QueryRow(showTblStmt).Scan(&tblName, &createStmt)
- if err != nil {
- o.Fatal(err)
- }
- return strings.Contains(createStmt, "schema_locked = true")
-}
-
-// Set the schema_locked storage parameter.
-func setSchemaLocked(
- ctx context.Context, o operation.Operation, conn *gosql.DB, db, tbl string, lock bool,
-) {
- stmt := fmt.Sprintf("ALTER TABLE %s.%s SET (schema_locked=%v)", db, tbl, lock)
- o.Status(fmt.Sprintf("setting schema_locked = %v on table %s.%s", lock, db, tbl))
- _, err := conn.ExecContext(ctx, stmt)
- if err != nil {
- o.Fatal(err)
- }
-}
diff --git a/pkg/cmd/roachtest/option/options.go b/pkg/cmd/roachtest/option/options.go
index 45074042e043..2cf4b7a43e71 100644
--- a/pkg/cmd/roachtest/option/options.go
+++ b/pkg/cmd/roachtest/option/options.go
@@ -243,15 +243,6 @@ func NoBackupSchedule(opts interface{}) {
}
}
-// DisableWALFailover can be used to generate StartOpts that disable use of WAL
-// failover.
-func DisableWALFailover(opts interface{}) {
- switch opts := opts.(type) {
- case *StartOpts:
- opts.RoachprodOpts.WALFailover = ""
- }
-}
-
// Graceful performs a graceful stop of the cockroach process.
func Graceful(gracePeriodSeconds int) func(interface{}) {
return func(opts interface{}) {
diff --git a/pkg/cmd/roachtest/roachtestflags/flags.go b/pkg/cmd/roachtest/roachtestflags/flags.go
index 5b6d65e1191f..5761e7dc5102 100644
--- a/pkg/cmd/roachtest/roachtestflags/flags.go
+++ b/pkg/cmd/roachtest/roachtestflags/flags.go
@@ -365,7 +365,7 @@ var (
List of =. If a certain version
is present in the list, the respective binary will be used when a
mixed-version test asks for the respective binary, instead of roachprod
- stage . Example: v20.1.4=cockroach-20.1,v20.2.0=cockroach-20.2.`,
+ stage . Example: 20.1.4=cockroach-20.1,20.2.0=cockroach-20.2.`,
})
SlackToken string
diff --git a/pkg/cmd/roachtest/roachtestutil/mixedversion/steps.go b/pkg/cmd/roachtest/roachtestutil/mixedversion/steps.go
index 23c010f5f689..aa73877a4158 100644
--- a/pkg/cmd/roachtest/roachtestutil/mixedversion/steps.go
+++ b/pkg/cmd/roachtest/roachtestutil/mixedversion/steps.go
@@ -636,14 +636,9 @@ func quoteVersionForPresentation(v string) string {
// regular backups as some tests check for running jobs and the
// scheduled backup may make things non-deterministic. In the future,
// we should change the default and add an API for tests to opt-out of
-// the default scheduled backup if necessary. We disable WAL failover
-// because some versions before v24.1 will early exit since they don't
-// understand the `--wal-failover` flag.
+// the default scheduled backup if necessary.
func startOpts(opts ...option.StartStopOption) option.StartOpts {
return option.NewStartOpts(
- append([]option.StartStopOption{
- option.NoBackupSchedule,
- option.DisableWALFailover,
- }, opts...)...,
+ append([]option.StartStopOption{option.NoBackupSchedule}, opts...)...,
)
}
diff --git a/pkg/cmd/roachtest/run.go b/pkg/cmd/roachtest/run.go
index 7537e5ee9991..305187663ab5 100644
--- a/pkg/cmd/roachtest/run.go
+++ b/pkg/cmd/roachtest/run.go
@@ -33,7 +33,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
- "github.com/cockroachdb/cockroach/pkg/util/version"
"github.com/cockroachdb/errors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/spf13/cobra"
@@ -289,12 +288,6 @@ func initRunFlagsBinariesAndLibraries(cmd *cobra.Command) error {
if roachtestflags.SelectProbability > 0 && roachtestflags.SelectProbability < 1 {
fmt.Printf("Matching tests will be selected with probability %.2f\n", roachtestflags.SelectProbability)
}
-
- for override := range roachtestflags.VersionsBinaryOverride {
- if _, err := version.Parse(override); err != nil {
- return errors.Wrapf(err, "binary version override %s is not a valid version", override)
- }
- }
return nil
}
diff --git a/pkg/cmd/roachtest/spec/cluster_spec.go b/pkg/cmd/roachtest/spec/cluster_spec.go
index b20a30b00f2e..8f5fec02a99c 100644
--- a/pkg/cmd/roachtest/spec/cluster_spec.go
+++ b/pkg/cmd/roachtest/spec/cluster_spec.go
@@ -125,7 +125,6 @@ type ClusterSpec struct {
MachineType string
MinCPUPlatform string
VolumeType string
- VolumeCount int // volume count is only supported for GCE. This can be moved up if we start supporting other clouds
Zones string
} `cloud:"gce"`
@@ -238,7 +237,6 @@ func getGCEOpts(
minCPUPlatform string,
arch vm.CPUArch,
volumeType string,
- volumeCount int,
useSpot bool,
) vm.ProviderOpts {
opts := gce.DefaultProviderOpts()
@@ -252,9 +250,6 @@ func getGCEOpts(
if volumeSize != 0 {
opts.PDVolumeSize = volumeSize
}
- if volumeCount != 0 {
- opts.PDVolumeCount = volumeCount
- }
opts.SSDCount = localSSDCount
if localSSD && localSSDCount > 0 {
// NB: As the default behavior for _roachprod_ (at least in AWS/GCP) is
@@ -466,11 +461,11 @@ func (s *ClusterSpec) RoachprodOpts(
case GCE:
providerOpts = getGCEOpts(machineType, s.VolumeSize, ssdCount,
createVMOpts.SSDOpts.UseLocalSSD, s.RAID0, s.TerminateOnMigration,
- s.GCE.MinCPUPlatform, vm.ParseArch(createVMOpts.Arch), s.GCE.VolumeType, s.GCE.VolumeCount, s.UseSpotVMs,
+ s.GCE.MinCPUPlatform, vm.ParseArch(createVMOpts.Arch), s.GCE.VolumeType, s.UseSpotVMs,
)
workloadProviderOpts = getGCEOpts(workloadMachineType, s.VolumeSize, ssdCount,
createVMOpts.SSDOpts.UseLocalSSD, s.RAID0, s.TerminateOnMigration,
- s.GCE.MinCPUPlatform, vm.ParseArch(createVMOpts.Arch), s.GCE.VolumeType, s.GCE.VolumeCount, s.UseSpotVMs,
+ s.GCE.MinCPUPlatform, vm.ParseArch(createVMOpts.Arch), s.GCE.VolumeType, s.UseSpotVMs,
)
case Azure:
providerOpts = getAzureOpts(machineType, s.VolumeSize)
diff --git a/pkg/cmd/roachtest/tests/admission_control_latency.go b/pkg/cmd/roachtest/tests/admission_control_latency.go
index 7ab8f7ce39ff..5c7cdc185187 100644
--- a/pkg/cmd/roachtest/tests/admission_control_latency.go
+++ b/pkg/cmd/roachtest/tests/admission_control_latency.go
@@ -976,8 +976,7 @@ func (v variations) startNoBackup(
opts.RoachprodOpts.ExtraArgs = append(opts.RoachprodOpts.ExtraArgs,
fmt.Sprintf("--locality=region=fake-%d", (node-1)/nodesPerRegion))
opts.RoachprodOpts.ExtraArgs = append(opts.RoachprodOpts.ExtraArgs, extraArgs...)
- settings := install.MakeClusterSettings(install.EnvOption([]string{"GODEBUG=gctrace=1"}))
- v.Start(ctx, t.L(), opts, settings, v.Node(node))
+ v.Start(ctx, t.L(), opts, install.MakeClusterSettings(), v.Node(node))
}
}
diff --git a/pkg/cmd/roachtest/tests/backup_fixtures.go b/pkg/cmd/roachtest/tests/backup_fixtures.go
index f526b2903f9b..873a60c05d1a 100644
--- a/pkg/cmd/roachtest/tests/backup_fixtures.go
+++ b/pkg/cmd/roachtest/tests/backup_fixtures.go
@@ -147,7 +147,7 @@ func (bd *backupDriver) prepareCluster(ctx context.Context) {
require.NoError(bd.t, clusterupgrade.StartWithSettings(ctx, bd.t.L(), bd.c,
bd.sp.hardware.getCRDBNodes(),
- option.NewStartOpts(option.NoBackupSchedule, option.DisableWALFailover),
+ option.NewStartOpts(option.NoBackupSchedule),
install.BinaryOption(binaryPath)))
bd.assertCorrectCockroachBinary(ctx)
diff --git a/pkg/cmd/roachtest/tests/cdc.go b/pkg/cmd/roachtest/tests/cdc.go
index aff757f09f3b..4fe7d62b15dd 100644
--- a/pkg/cmd/roachtest/tests/cdc.go
+++ b/pkg/cmd/roachtest/tests/cdc.go
@@ -725,13 +725,14 @@ type opt func(ct *cdcTester)
// N.B. this allocates a workload + sink only node, without it, workload and sink will be on the same node.
func withNumSinkNodes(num int) opt {
return func(ct *cdcTester) {
- ct.workloadNode = ct.cluster.Node(ct.cluster.Spec().NodeCount - num)
- ct.sinkNodes = ct.cluster.Range(ct.cluster.Spec().NodeCount-num+1, ct.cluster.Spec().NodeCount)
+ ct.crdbNodes = ct.cluster.Range(1, ct.cluster.Spec().NodeCount-num-1)
+ ct.workloadNode = ct.cluster.Node(ct.cluster.Spec().NodeCount)
+ ct.sinkNodes = ct.cluster.Range(ct.cluster.Spec().NodeCount-num, ct.cluster.Spec().NodeCount-1)
}
}
func newCDCTester(ctx context.Context, t test.Test, c cluster.Cluster, opts ...opt) cdcTester {
- // By convention the nodes are split up like [crdb..., workload, sink...]. The sink nodes are always the last numSinkNodes ones.
+ // By convention the nodes are split up like [crdb..., sink..., workload].
// N.B.:
// - If it's a single node cluster everything shares node 1.
// - If the sink is not provisioned through the withNumSinkNodes opt, it shares a node with the workload node.
@@ -1663,7 +1664,7 @@ func registerCDC(r registry.Registry) {
Benchmark: true,
Cluster: r.MakeClusterSpec(6, spec.CPU(16)),
Leases: registry.MetamorphicLeases,
- CompatibleClouds: registry.AllExceptAWS,
+ CompatibleClouds: registry.OnlyGCE,
Suites: registry.Suites(registry.Nightly),
RequiresLicense: true,
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
@@ -1683,7 +1684,7 @@ func registerCDC(r registry.Registry) {
})
ct.runFeedLatencyVerifier(feed, latencyTargets{
initialScanLatency: 3 * time.Minute,
- steadyLatency: 5 * time.Minute,
+ steadyLatency: 10 * time.Minute,
})
ct.waitForWorkload()
},
@@ -2295,7 +2296,7 @@ func registerCDC(r registry.Registry) {
CompatibleClouds: registry.AllExceptAWS,
Suites: registry.Suites(registry.Nightly),
RequiresLicense: true,
- Timeout: 30 * time.Minute,
+ Timeout: 60 * time.Minute,
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
runCDCBank(ctx, t, c)
},
diff --git a/pkg/cmd/roachtest/tests/cdc_bench.go b/pkg/cmd/roachtest/tests/cdc_bench.go
index 5246267d0901..794d5b9b445a 100644
--- a/pkg/cmd/roachtest/tests/cdc_bench.go
+++ b/pkg/cmd/roachtest/tests/cdc_bench.go
@@ -56,17 +56,15 @@ const (
// practice it can.
cdcBenchColdCatchupScan cdcBenchScanType = "catchup-cold"
- cdcBenchNoServer cdcBenchServer = ""
- // The legacy processor was removed in 25.1+. In such
- // timeseries, "processor" refers to the now defunct legacy
- // processor.
+ cdcBenchNoServer cdcBenchServer = ""
+ cdcBenchProcessorServer cdcBenchServer = "processor" // legacy processor
cdcBenchSchedulerServer cdcBenchServer = "scheduler" // new scheduler
)
var (
cdcBenchScanTypes = []cdcBenchScanType{
cdcBenchInitialScan, cdcBenchCatchupScan, cdcBenchColdCatchupScan}
- cdcBenchServers = []cdcBenchServer{cdcBenchSchedulerServer}
+ cdcBenchServers = []cdcBenchServer{cdcBenchProcessorServer, cdcBenchSchedulerServer}
)
func registerCDCBench(r registry.Registry) {
@@ -88,7 +86,7 @@ func registerCDCBench(r registry.Registry) {
Benchmark: true,
Cluster: r.MakeClusterSpec(nodes+1, spec.CPU(cpus)),
CompatibleClouds: registry.AllExceptAWS,
- Suites: registry.Suites(registry.Weekly),
+ Suites: registry.Suites(registry.Nightly),
RequiresLicense: true,
Timeout: 4 * time.Hour, // Allow for the initial import and catchup scans with 100k ranges.
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
@@ -116,7 +114,7 @@ func registerCDCBench(r registry.Registry) {
Benchmark: true,
Cluster: r.MakeClusterSpec(nodes+2, spec.CPU(cpus)),
CompatibleClouds: registry.AllExceptAWS,
- Suites: registry.Suites(registry.Weekly),
+ Suites: registry.Suites(registry.Nightly),
RequiresLicense: true,
Timeout: time.Hour,
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
@@ -134,7 +132,7 @@ func registerCDCBench(r registry.Registry) {
Benchmark: true,
Cluster: r.MakeClusterSpec(nodes+2, spec.CPU(cpus)),
CompatibleClouds: registry.AllExceptAWS,
- Suites: registry.Suites(registry.Weekly),
+ Suites: registry.Suites(registry.Nightly),
RequiresLicense: true,
Timeout: time.Hour,
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
@@ -150,7 +148,7 @@ func registerCDCBench(r registry.Registry) {
Benchmark: true,
Cluster: r.MakeClusterSpec(nodes+3, spec.CPU(cpus)),
CompatibleClouds: registry.AllExceptAWS,
- Suites: registry.Suites(registry.Weekly),
+ Suites: registry.Suites(registry.Nightly),
RequiresLicense: true,
Timeout: time.Hour,
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
@@ -418,6 +416,8 @@ func runCDCBenchWorkload(
settings.ClusterSettings["server.child_metrics.enabled"] = "true"
switch server {
+ case cdcBenchProcessorServer:
+ settings.ClusterSettings["kv.rangefeed.scheduler.enabled"] = "false"
case cdcBenchSchedulerServer:
settings.ClusterSettings["kv.rangefeed.scheduler.enabled"] = "true"
case cdcBenchNoServer:
diff --git a/pkg/cmd/roachtest/tests/cluster_to_cluster.go b/pkg/cmd/roachtest/tests/cluster_to_cluster.go
index 2952d64649a3..2c61ade422ae 100644
--- a/pkg/cmd/roachtest/tests/cluster_to_cluster.go
+++ b/pkg/cmd/roachtest/tests/cluster_to_cluster.go
@@ -1644,7 +1644,7 @@ func registerClusterReplicationResilience(r registry.Registry) {
<-shutdownSetupDone
// Eagerly listen to cutover signal to exercise node shutdown during actual cutover.
- rrd.setup.dst.sysSQL.Exec(t, `SET CLUSTER SETTING bulkio.stream_ingestion.cutover_signal_poll_interval='5s'`)
+ rrd.setup.dst.sysSQL.Exec(t, `SET CLUSTER SETTING bulkio.stream_ingestion.failover_signal_poll_interval='5s'`)
// While executing a node shutdown on either the src or destination
// cluster, ensure the destination cluster's stream ingestion job
diff --git a/pkg/cmd/roachtest/tests/disk_stall.go b/pkg/cmd/roachtest/tests/disk_stall.go
index bb881a714b6b..e1b725765f30 100644
--- a/pkg/cmd/roachtest/tests/disk_stall.go
+++ b/pkg/cmd/roachtest/tests/disk_stall.go
@@ -44,12 +44,14 @@ func registerDiskStalledWALFailover(r registry.Registry) {
EncryptionSupport: registry.EncryptionMetamorphic,
Leases: registry.MetamorphicLeases,
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
- runDiskStalledWALFailover(ctx, t, c)
+ runDiskStalledWALFailover(ctx, t, c, "among-stores")
},
})
}
-func runDiskStalledWALFailover(ctx context.Context, t test.Test, c cluster.Cluster) {
+func runDiskStalledWALFailover(
+ ctx context.Context, t test.Test, c cluster.Cluster, failoverFlag string,
+) {
startSettings := install.MakeClusterSettings()
// Set a high value for the max sync durations to avoid the disk
// stall detector fataling the node.
@@ -66,8 +68,12 @@ func runDiskStalledWALFailover(ctx context.Context, t test.Test, c cluster.Clust
t.Status("starting cluster")
startOpts := option.DefaultStartOpts()
- startOpts.RoachprodOpts.WALFailover = "among-stores"
- startOpts.RoachprodOpts.StoreCount = 2
+ if failoverFlag == "among-stores" {
+ startOpts.RoachprodOpts.StoreCount = 2
+ }
+ startOpts.RoachprodOpts.ExtraArgs = []string{
+ "--wal-failover=" + failoverFlag,
+ }
c.Start(ctx, t.L(), startOpts, startSettings, c.CRDBNodes())
// Open a SQL connection to n1, the node that will be stalled.
diff --git a/pkg/cmd/roachtest/tests/kv.go b/pkg/cmd/roachtest/tests/kv.go
index 2f2ab06cdd04..c76f1af4c8cb 100644
--- a/pkg/cmd/roachtest/tests/kv.go
+++ b/pkg/cmd/roachtest/tests/kv.go
@@ -60,8 +60,6 @@ func registerKV(r registry.Registry) {
// Set to true to make jemalloc release memory more aggressively to the
// OS, to reduce resident size.
jemallocReleaseFaster bool
- // Set to true to reduce the Pebble block cache from 25% to 20%.
- smallBlockCache bool
}
computeConcurrency := func(opts kvOptions) int {
// Scale the workload concurrency with the number of nodes in the cluster to
@@ -98,9 +96,6 @@ func registerKV(r registry.Registry) {
settings.Env = append(settings.Env,
"MALLOC_CONF=background_thread:true,dirty_decay_ms:2000,muzzy_decay_ms:0")
}
- if opts.smallBlockCache {
- startOpts.RoachprodOpts.ExtraArgs = append(startOpts.RoachprodOpts.ExtraArgs, "--cache=0.20")
- }
c.Start(ctx, t.L(), startOpts, settings, c.CRDBNodes())
db := c.Conn(ctx, t.L(), 1)
@@ -199,11 +194,7 @@ func registerKV(r registry.Registry) {
// from the 80KB size class in jemalloc. So the allocated bytes from jemalloc
// by the block cache are 20% higher than configured. By setting this flag to true,
// we reduce the (resident-allocated) size in jemalloc.
- //
- // Also reduce the Pebble block cache, since we have seen one OOM despite
- // the jemallocReleaseFaster setting.
- {nodes: 1, cpus: 8, readPercent: 0, concMultiplier: 4096, blockSize: 1 << 16, /* 64 KB */
- jemallocReleaseFaster: true, smallBlockCache: true},
+ {nodes: 1, cpus: 8, readPercent: 0, concMultiplier: 4096, blockSize: 1 << 16 /* 64 KB */, jemallocReleaseFaster: true},
{nodes: 1, cpus: 8, readPercent: 95},
{nodes: 1, cpus: 8, readPercent: 95, sharedProcessMT: true},
{nodes: 1, cpus: 32, readPercent: 0},
diff --git a/pkg/cmd/roachtest/tests/mixed_version_backup.go b/pkg/cmd/roachtest/tests/mixed_version_backup.go
index 47dc8628cb3f..f51be5f059ed 100644
--- a/pkg/cmd/roachtest/tests/mixed_version_backup.go
+++ b/pkg/cmd/roachtest/tests/mixed_version_backup.go
@@ -2455,15 +2455,10 @@ func (u *CommonTestUtils) resetCluster(
return fmt.Errorf("failed to wipe cluster: %w", err)
}
- var opts = []option.StartStopOption{option.NoBackupSchedule}
- if !version.AtLeast(clusterupgrade.MustParseVersion("v24.1.0")) {
- opts = append(opts, option.DisableWALFailover)
- }
-
cockroachPath := clusterupgrade.CockroachPathForVersion(u.t, version)
settings = append(settings, install.BinaryOption(cockroachPath), install.SecureOption(true))
return clusterupgrade.StartWithSettings(
- ctx, l, u.cluster, u.roachNodes, option.NewStartOpts(opts...), settings...,
+ ctx, l, u.cluster, u.roachNodes, option.NewStartOpts(option.NoBackupSchedule), settings...,
)
}
diff --git a/pkg/cmd/roachtest/tests/multi_store_remove.go b/pkg/cmd/roachtest/tests/multi_store_remove.go
index 36b4527ab71e..878b9098e756 100644
--- a/pkg/cmd/roachtest/tests/multi_store_remove.go
+++ b/pkg/cmd/roachtest/tests/multi_store_remove.go
@@ -52,11 +52,6 @@ func runMultiStoreRemove(ctx context.Context, t test.Test, c cluster.Cluster) {
t.Status("starting cluster")
startOpts := option.DefaultStartOpts()
startOpts.RoachprodOpts.StoreCount = multiStoreStoresPerNode
- // TODO(jackson): Allow WAL failover to be enabled once it's able to
- // tolerate the removal of a store. Today, the mapping of failover
- // secondaries is fixed, making WAL failover incompatible with the removal
- // of a store.
- startOpts.RoachprodOpts.WALFailover = "disabled"
startSettings := install.MakeClusterSettings()
// Speed up the replicate queue.
startSettings.Env = append(startSettings.Env, "COCKROACH_SCAN_INTERVAL=30s")
diff --git a/pkg/cmd/roachtest/tests/rebalance_load.go b/pkg/cmd/roachtest/tests/rebalance_load.go
index a40d98ea318a..d0fbc8e630ba 100644
--- a/pkg/cmd/roachtest/tests/rebalance_load.go
+++ b/pkg/cmd/roachtest/tests/rebalance_load.go
@@ -344,7 +344,7 @@ func makeStoreCPUFn(
tsQueries := make([]tsQuery, numNodes)
for i := range tsQueries {
tsQueries[i] = tsQuery{
- name: "cr.node.sys.cpu.combined.percent-normalized",
+ name: "cr.node.sys.cpu.host.combined.percent-normalized",
queryType: total,
sources: []string{fmt.Sprintf("%d", i+1)},
tenantID: roachpb.SystemTenantID,
@@ -376,8 +376,8 @@ func makeStoreCPUFn(
// as much to avoid any surprises.
if cpu < 0 || cpu > 1 {
return nil, errors.Newf(
- "node %d has core count normalized CPU utilization ts datapoint "+
- "not in [0%,100%] (impossible!): %f [resp=%+v]", node, cpu, resp)
+ "node idx %d has core count normalized CPU utilization ts datapoint "+
+ "not in [0\\%,100\\%] (impossible!): %v [resp=%+v]", node, cpu, resp)
}
nodeIdx := node * storesPerNode
diff --git a/pkg/cmd/roachtest/tests/util.go b/pkg/cmd/roachtest/tests/util.go
index e7bb108becf9..a3fe0a52d4cd 100644
--- a/pkg/cmd/roachtest/tests/util.go
+++ b/pkg/cmd/roachtest/tests/util.go
@@ -6,8 +6,6 @@
package tests
import (
- "archive/zip"
- "bytes"
"context"
"fmt"
"io"
@@ -15,7 +13,6 @@ import (
"os"
"path/filepath"
"regexp"
- "strings"
"time"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster"
@@ -236,70 +233,15 @@ func downloadProfiles(
return err
}
url := urlPrefix + diagID
- resp, err := client.Get(context.Background(), url)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- // Copy the contents of the URL to a BytesBuffer to determine the
- // filename before saving it below.
- var buf bytes.Buffer
- _, err = io.Copy(&buf, resp.Body)
- if err != nil {
- return err
- }
- filename, err := getFilename(collectedAt, buf)
- if err != nil {
- return err
- }
- // write the buf to the filename
- file, err := os.Create(filepath.Join(stmtDir, filename))
- if err != nil {
+ filename := fmt.Sprintf("%s-%s.zip", collectedAt.Format("2006-01-02T15_04_05Z07:00"), diagID)
+ logger.Printf("downloading profile %s", filename)
+ if err := client.Download(ctx, url, filepath.Join(stmtDir, filename)); err != nil {
return err
}
- if _, err := io.Copy(file, &buf); err != nil {
- return err
- }
- logger.Printf("downloaded profile %s", filename)
}
return nil
}
-// getFilename creates a file name for the profile based on the traced operation
-// and duration. An example filename is
-// 2024-10-24T18_23_57Z-UPSERT-101.490ms.zip.
-func getFilename(collectedAt time.Time, buf bytes.Buffer) (string, error) {
- // Download the zip to a BytesBuffer.
- unzip, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
- if err != nil {
- return "", err
- }
- // NB: The format of the trace.txt file is not guaranteed to be stable. If
- // this proves problematic we could parse the trace.json instead. Parsing
- // the trace.txt is easier due to the nested structure of the trace.json.
- r, err := unzip.Open("trace.txt")
- if err != nil {
- return "", err
- }
- bytes, err := io.ReadAll(r)
- if err != nil {
- return "", err
- }
- if err = r.Close(); err != nil {
- return "", err
- }
- lines := strings.Split(string(bytes), "\n")
- // The first line is the SQL statement. An example is `UPSERT INTO kv (k, v)
- // VALUES ($1, $2)`. We only grab the operation to help differentiate
- // traces. An alternative if this isn't differentiated enough is to use the
- // entire fingerprint text, however that creates longs and complex
- // filenames.
- operation := strings.Split(strings.TrimSpace(lines[0]), " ")[0]
- // Use the second to last line because the last line is empty.
- duration := strings.Split(strings.TrimSpace(lines[len(lines)-2]), " ")[0]
- return fmt.Sprintf("%s-%s-%s.zip", collectedAt.Format("2006-01-02T15_04_05Z07:00"), operation, duration), nil
-}
-
type IP struct {
Query string
}
diff --git a/pkg/gen/protobuf.bzl b/pkg/gen/protobuf.bzl
index 3f6cde8019b0..798f02f9efa1 100644
--- a/pkg/gen/protobuf.bzl
+++ b/pkg/gen/protobuf.bzl
@@ -90,7 +90,6 @@ PROTOBUF_SRCS = [
"//pkg/util/timeutil/pgdate:pgdate_go_proto",
"//pkg/util/tracing/tracingpb:tracingpb_go_proto",
"//pkg/util/tracing/tracingservicepb:tracingservicepb_go_proto",
- "//pkg/util/vector:vector_go_proto",
"//pkg/util:util_go_proto",
"//pkg/workload/histogram:histogram_go_proto",
]
diff --git a/pkg/jobs/jobs_test.go b/pkg/jobs/jobs_test.go
index cfccdad4178d..94b829b31556 100644
--- a/pkg/jobs/jobs_test.go
+++ b/pkg/jobs/jobs_test.go
@@ -2377,7 +2377,7 @@ func TestJobInTxn(t *testing.T) {
txn, err := sqlDB.Begin()
require.NoError(t, err)
- _, err = txn.Exec("BACKUP tobeaborted INTO doesnotmattter")
+ _, err = txn.Exec("BACKUP tobeaborted TO doesnotmattter")
require.NoError(t, err)
// If we rollback then the job should not run
@@ -2402,7 +2402,7 @@ func TestJobInTxn(t *testing.T) {
// Now let's actually commit the transaction and check that the job ran.
txn, err := sqlDB.Begin()
require.NoError(t, err)
- _, err = txn.Exec("BACKUP tocommit INTO foo")
+ _, err = txn.Exec("BACKUP tocommit TO foo")
require.NoError(t, err)
// Committing will block and wait for all jobs to run.
require.NoError(t, txn.Commit())
@@ -2424,10 +2424,10 @@ func TestJobInTxn(t *testing.T) {
require.NoError(t, err)
// Add a succeeding job.
- _, err = txn.Exec("BACKUP doesnotmatter INTO doesnotmattter")
+ _, err = txn.Exec("BACKUP doesnotmatter TO doesnotmattter")
require.NoError(t, err)
// We hooked up a failing test job to RESTORE.
- _, err = txn.Exec("RESTORE TABLE tbl FROM LATEST IN somewhere")
+ _, err = txn.Exec("RESTORE TABLE tbl FROM somewhere")
require.NoError(t, err)
// Now let's actually commit the transaction and check that there is a
diff --git a/pkg/jobs/jobspb/wrap.go b/pkg/jobs/jobspb/wrap.go
index 861a783b401e..34b19d5aa83f 100644
--- a/pkg/jobs/jobspb/wrap.go
+++ b/pkg/jobs/jobspb/wrap.go
@@ -102,13 +102,13 @@ var _ base.SQLInstanceID
type ReplicationStatus uint8
const (
- InitializingReplication ReplicationStatus = 0
- CreatingInitialSplits ReplicationStatus = 6
- Replicating ReplicationStatus = 1
- ReplicationPaused ReplicationStatus = 2
- ReplicationPendingCutover ReplicationStatus = 3
- ReplicationCuttingOver ReplicationStatus = 4
- ReplicationError ReplicationStatus = 5
+ InitializingReplication ReplicationStatus = 0
+ CreatingInitialSplits ReplicationStatus = 6
+ Replicating ReplicationStatus = 1
+ ReplicationPaused ReplicationStatus = 2
+ ReplicationPendingFailover ReplicationStatus = 3
+ ReplicationFailingOver ReplicationStatus = 4
+ ReplicationError ReplicationStatus = 5
)
// String implements fmt.Stringer.
@@ -120,10 +120,10 @@ func (rs ReplicationStatus) String() string {
return "replicating"
case ReplicationPaused:
return "replication paused"
- case ReplicationPendingCutover:
- return "replication pending cutover"
- case ReplicationCuttingOver:
- return "replication cutting over"
+ case ReplicationPendingFailover:
+ return "replication pending failover"
+ case ReplicationFailingOver:
+ return "replication failing over"
case ReplicationError:
return "replication error"
case CreatingInitialSplits:
diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go
index c38c36264fe6..d83149539f03 100644
--- a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go
+++ b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go
@@ -4698,16 +4698,26 @@ func TestPartialPartition(t *testing.T) {
{false, 3, [][2]roachpb.NodeID{{1, 2}}},
}
for _, test := range testCases {
- t.Run(fmt.Sprintf("%t-%d", test.useProxy, test.numServers),
- func(t *testing.T) {
+ t.Run(fmt.Sprintf("%t-%d", test.useProxy, test.numServers), func(t *testing.T) {
+ testutils.RunValues(t, "lease-type", roachpb.LeaseTypes(), func(t *testing.T, leaseType roachpb.LeaseType) {
st := cluster.MakeTestingClusterSettings()
kvcoord.ProxyBatchRequest.Override(ctx, &st.SV, test.useProxy)
- // With epoch leases this test doesn't work reliably. It passes
- // in cases where it should fail and fails in cases where it
- // should pass.
- // TODO(baptist): Attempt to pin the liveness leaseholder to
- // node 3 to make epoch leases reliable.
- kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, true)
+ switch leaseType {
+ case roachpb.LeaseExpiration:
+ kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, true)
+ case roachpb.LeaseEpoch:
+ // With epoch leases this test doesn't work reliably. It passes
+ // in cases where it should fail and fails in cases where it
+ // should pass.
+ // TODO(baptist): Attempt to pin the liveness leaseholder to
+ // node 3 to make epoch leases reliable.
+ skip.IgnoreLint(t, "flaky with epoch leases")
+ case roachpb.LeaseLeader:
+ kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false)
+ kvserver.RaftLeaderFortificationFractionEnabled.Override(ctx, &st.SV, 1.0)
+ default:
+ t.Fatalf("unknown lease type %s", leaseType)
+ }
kvserver.RangefeedEnabled.Override(ctx, &st.SV, true)
kvserver.RangeFeedRefreshInterval.Override(ctx, &st.SV, 10*time.Millisecond)
closedts.TargetDuration.Override(ctx, &st.SV, 10*time.Millisecond)
@@ -4801,6 +4811,7 @@ func TestPartialPartition(t *testing.T) {
tc.Stopper().Stop(ctx)
})
+ })
}
}
@@ -4812,104 +4823,121 @@ func TestProxyTracing(t *testing.T) {
defer log.Scope(t).Close(t)
ctx := context.Background()
- const numServers = 3
- const numRanges = 3
- st := cluster.MakeTestingClusterSettings()
- kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, true)
- kvserver.RangefeedEnabled.Override(ctx, &st.SV, true)
- kvserver.RangeFeedRefreshInterval.Override(ctx, &st.SV, 10*time.Millisecond)
- closedts.TargetDuration.Override(ctx, &st.SV, 10*time.Millisecond)
- closedts.SideTransportCloseInterval.Override(ctx, &st.SV, 10*time.Millisecond)
-
- var p rpc.Partitioner
- tc := testcluster.StartTestCluster(t, numServers, base.TestClusterArgs{
- ServerArgsPerNode: func() map[int]base.TestServerArgs {
- perNode := make(map[int]base.TestServerArgs)
- for i := 0; i < numServers; i++ {
- ctk := rpc.ContextTestingKnobs{}
- // Partition between n1 and n3.
- p.RegisterTestingKnobs(roachpb.NodeID(i+1), [][2]roachpb.NodeID{{1, 3}}, &ctk)
- perNode[i] = base.TestServerArgs{
- Settings: st,
- Knobs: base.TestingKnobs{
- Server: &server.TestingKnobs{
- ContextTestingKnobs: ctk,
+ testutils.RunValues(t, "lease-type", roachpb.LeaseTypes(), func(t *testing.T, leaseType roachpb.LeaseType) {
+ const numServers = 3
+ const numRanges = 3
+ st := cluster.MakeTestingClusterSettings()
+ switch leaseType {
+ case roachpb.LeaseExpiration:
+ kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, true)
+ case roachpb.LeaseEpoch:
+ // With epoch leases this test doesn't work reliably. It passes
+ // in cases where it should fail and fails in cases where it
+ // should pass.
+ // TODO(baptist): Attempt to pin the liveness leaseholder to
+ // node 3 to make epoch leases reliable.
+ skip.IgnoreLint(t, "flaky with epoch leases")
+ case roachpb.LeaseLeader:
+ kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false)
+ kvserver.RaftLeaderFortificationFractionEnabled.Override(ctx, &st.SV, 1.0)
+ default:
+ t.Fatalf("unknown lease type %s", leaseType)
+ }
+ kvserver.RangefeedEnabled.Override(ctx, &st.SV, true)
+ kvserver.RangeFeedRefreshInterval.Override(ctx, &st.SV, 10*time.Millisecond)
+ closedts.TargetDuration.Override(ctx, &st.SV, 10*time.Millisecond)
+ closedts.SideTransportCloseInterval.Override(ctx, &st.SV, 10*time.Millisecond)
+
+ var p rpc.Partitioner
+ tc := testcluster.StartTestCluster(t, numServers, base.TestClusterArgs{
+ ServerArgsPerNode: func() map[int]base.TestServerArgs {
+ perNode := make(map[int]base.TestServerArgs)
+ for i := 0; i < numServers; i++ {
+ ctk := rpc.ContextTestingKnobs{}
+ // Partition between n1 and n3.
+ p.RegisterTestingKnobs(roachpb.NodeID(i+1), [][2]roachpb.NodeID{{1, 3}}, &ctk)
+ perNode[i] = base.TestServerArgs{
+ Settings: st,
+ Knobs: base.TestingKnobs{
+ Server: &server.TestingKnobs{
+ ContextTestingKnobs: ctk,
+ },
},
- },
+ }
}
- }
- return perNode
- }(),
- })
- defer tc.Stopper().Stop(ctx)
-
- // Set up the mapping after the nodes have started and we have their
- // addresses.
- for i := 0; i < numServers; i++ {
- g := tc.Servers[i].StorageLayer().GossipI().(*gossip.Gossip)
- addr := g.GetNodeAddr().String()
- nodeID := g.NodeID.Get()
- p.RegisterNodeAddr(addr, nodeID)
- }
-
- conn := tc.Conns[0]
+ return perNode
+ }(),
+ })
+ defer tc.Stopper().Stop(ctx)
+
+ // Set up the mapping after the nodes have started and we have their
+ // addresses.
+ for i := 0; i < numServers; i++ {
+ g := tc.Servers[i].StorageLayer().GossipI().(*gossip.Gossip)
+ addr := g.GetNodeAddr().String()
+ nodeID := g.NodeID.Get()
+ p.RegisterNodeAddr(addr, nodeID)
+ }
- // Create a table and pin the leaseholder replicas to n3. The partition
- // between n1 and n3 will lead to re-routing via n2, which we expect captured
- // in the trace.
- _, err := conn.Exec("CREATE TABLE t (i INT)")
- require.NoError(t, err)
- _, err = conn.Exec("ALTER TABLE t CONFIGURE ZONE USING num_replicas=3, lease_preferences='[[+dc=dc3]]', constraints='[]'")
- require.NoError(t, err)
- _, err = conn.Exec(
- fmt.Sprintf("INSERT INTO t(i) select generate_series(1,%d)", numRanges-1))
- require.NoError(t, err)
- _, err = conn.Exec("ALTER TABLE t SPLIT AT SELECT i FROM t")
- require.NoError(t, err)
- require.NoError(t, tc.WaitForFullReplication())
+ conn := tc.Conns[0]
- leaseCount := func(node int) int {
- var count int
- err := conn.QueryRow(fmt.Sprintf(
- "SELECT count(*) FROM [SHOW RANGES FROM TABLE t WITH DETAILS] WHERE lease_holder = %d", node),
- ).Scan(&count)
+ // Create a table and pin the leaseholder replicas to n3. The partition
+ // between n1 and n3 will lead to re-routing via n2, which we expect captured
+ // in the trace.
+ _, err := conn.Exec("CREATE TABLE t (i INT)")
require.NoError(t, err)
- return count
- }
+ _, err = conn.Exec("ALTER TABLE t CONFIGURE ZONE USING num_replicas=3, lease_preferences='[[+dc=dc3]]', constraints='[]'")
+ require.NoError(t, err)
+ _, err = conn.Exec(
+ fmt.Sprintf("INSERT INTO t(i) select generate_series(1,%d)", numRanges-1))
+ require.NoError(t, err)
+ _, err = conn.Exec("ALTER TABLE t SPLIT AT SELECT i FROM t")
+ require.NoError(t, err)
+ require.NoError(t, tc.WaitForFullReplication())
- checkLeaseCount := func(node, expectedLeaseCount int) error {
- if count := leaseCount(node); count != expectedLeaseCount {
- require.NoError(t, tc.GetFirstStoreFromServer(t, 0).
- ForceLeaseQueueProcess())
- return errors.Errorf("expected %d leases on node %d, found %d",
- expectedLeaseCount, node, count)
+ leaseCount := func(node int) int {
+ var count int
+ err := conn.QueryRow(fmt.Sprintf(
+ "SELECT count(*) FROM [SHOW RANGES FROM TABLE t WITH DETAILS] WHERE lease_holder = %d", node),
+ ).Scan(&count)
+ require.NoError(t, err)
+ return count
}
- return nil
- }
- // Wait until the leaseholder for the test table ranges are on n3.
- testutils.SucceedsSoon(t, func() error {
- return checkLeaseCount(3, numRanges)
- })
+ checkLeaseCount := func(node, expectedLeaseCount int) error {
+ if count := leaseCount(node); count != expectedLeaseCount {
+ require.NoError(t, tc.GetFirstStoreFromServer(t, 0).
+ ForceLeaseQueueProcess())
+ return errors.Errorf("expected %d leases on node %d, found %d",
+ expectedLeaseCount, node, count)
+ }
+ return nil
+ }
- p.EnablePartition(true)
+ // Wait until the leaseholder for the test table ranges are on n3.
+ testutils.SucceedsSoon(t, func() error {
+ return checkLeaseCount(3, numRanges)
+ })
- _, err = conn.Exec("SET TRACING = on; SELECT FROM t where i = 987654321; SET TRACING = off")
- require.NoError(t, err)
+ p.EnablePartition(true)
- // Expect the "proxy request complete" message to be in the trace and that it
- // comes from the proxy node n2.
- var msg, tag, loc string
- if err = conn.QueryRowContext(ctx, `SELECT message, tag, location
- FROM [SHOW TRACE FOR SESSION]
- WHERE message LIKE '%proxy request complete%'
- AND location LIKE '%server/node%'
- AND tag LIKE '%n2%'`,
- ).Scan(&msg, &tag, &loc); err != nil {
- if errors.Is(err, gosql.ErrNoRows) {
- t.Fatalf("request succeeded without proxying")
+ _, err = conn.Exec("SET TRACING = on; SELECT FROM t where i = 987654321; SET TRACING = off")
+ require.NoError(t, err)
+
+ // Expect the "proxy request complete" message to be in the trace and that it
+ // comes from the proxy node n2.
+ var msg, tag, loc string
+ if err = conn.QueryRowContext(ctx, `SELECT message, tag, location
+ FROM [SHOW TRACE FOR SESSION]
+ WHERE message LIKE '%proxy request complete%'
+ AND location LIKE '%server/node%'
+ AND tag LIKE '%n2%'`,
+ ).Scan(&msg, &tag, &loc); err != nil {
+ if errors.Is(err, gosql.ErrNoRows) {
+ t.Fatalf("request succeeded without proxying")
+ }
+ t.Fatal(err)
}
- t.Fatal(err)
- }
- t.Logf("found trace event; msg=%s, tag=%s, loc=%s", msg, tag, loc)
+ t.Logf("found trace event; msg=%s, tag=%s, loc=%s", msg, tag, loc)
+ })
}
diff --git a/pkg/kv/kvclient/rangefeed/rangefeed_external_test.go b/pkg/kv/kvclient/rangefeed/rangefeed_external_test.go
index 3adfa02f025a..44a18c3e6453 100644
--- a/pkg/kv/kvclient/rangefeed/rangefeed_external_test.go
+++ b/pkg/kv/kvclient/rangefeed/rangefeed_external_test.go
@@ -56,17 +56,20 @@ var (
type kvs = storageutils.KVs
-type rangefeedTestType struct {
- useBufferedSender bool
+type feedProcessorType struct {
+ useScheduler bool
}
-func (t rangefeedTestType) String() string {
- return fmt.Sprintf("mux/buffered_sender=%t", t.useBufferedSender)
+func (t feedProcessorType) String() string {
+ return fmt.Sprintf("mux/scheduler=%t", t.useScheduler)
}
-var feedTypes = []rangefeedTestType{
+var procTypes = []feedProcessorType{
{
- useBufferedSender: false,
+ useScheduler: false,
+ },
+ {
+ useScheduler: true,
},
}
@@ -76,13 +79,13 @@ func TestRangeFeedIntegration(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- testutils.RunValues(t, "feed type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx := context.Background()
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
srv, _, db := serverutils.StartServer(t, base.TestServerArgs{
Settings: settings,
})
@@ -178,13 +181,13 @@ func TestWithOnFrontierAdvance(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- testutils.RunValues(t, "feed_type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx := context.Background()
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgs: base.TestServerArgs{Settings: settings},
@@ -318,13 +321,13 @@ func TestWithOnCheckpoint(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- testutils.RunValues(t, "feed_type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx := context.Background()
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
srv, _, db := serverutils.StartServer(t, base.TestServerArgs{
Settings: settings,
})
@@ -424,13 +427,13 @@ func TestRangefeedValueTimestamps(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- testutils.RunValues(t, "feed_type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx := context.Background()
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
srv, _, db := serverutils.StartServer(t, base.TestServerArgs{
Settings: settings,
})
@@ -544,13 +547,13 @@ func TestWithOnSSTable(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- testutils.RunValues(t, "feed_type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx := context.Background()
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
srv, _, db := serverutils.StartServer(t, base.TestServerArgs{
Settings: settings,
DefaultTestTenant: base.TestIsForStuffThatShouldWorkWithSecondaryTenantsButDoesntYet(109473),
@@ -647,14 +650,15 @@ func TestWithOnSSTableCatchesUpIfNotSet(t *testing.T) {
storage.DisableMetamorphicSimpleValueEncoding(t)
- testutils.RunValues(t, "feed_type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
+
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
DefaultTestTenant: base.TestIsForStuffThatShouldWorkWithSecondaryTenantsButDoesntYet(109473),
@@ -760,13 +764,13 @@ func TestWithOnDeleteRange(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- testutils.RunValues(t, "feed_type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx := context.Background()
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Settings: settings,
@@ -947,13 +951,13 @@ func TestUnrecoverableErrors(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- testutils.RunValues(t, "feed_type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx := context.Background()
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
srv, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{
DefaultTestTenant: base.TestIsForStuffThatShouldWorkWithSecondaryTenantsButDoesntYet(109472),
Knobs: base.TestingKnobs{
@@ -1047,14 +1051,15 @@ func TestMVCCHistoryMutationError(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- testutils.RunValues(t, "feed_type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
+
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
srv, _, db := serverutils.StartServer(t, base.TestServerArgs{
Settings: settings,
})
@@ -1136,13 +1141,13 @@ func TestRangefeedWithLabelsOption(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- testutils.RunValues(t, "feed_type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx := context.Background()
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
srv, _, db := serverutils.StartServer(t, base.TestServerArgs{
Settings: settings,
})
@@ -1255,13 +1260,13 @@ func TestRangeFeedStartTimeExclusive(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- testutils.RunValues(t, "feed_type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx := context.Background()
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
srv, _, db := serverutils.StartServer(t, base.TestServerArgs{
Settings: settings,
})
@@ -1451,7 +1456,7 @@ func TestRangeFeedIntentResolutionRace(t *testing.T) {
}
eventC := make(chan *kvpb.RangeFeedEvent)
sink := newChannelSink(ctx, eventC)
- require.NoError(t, s3.RangeFeed(sink.ctx, &req, sink)) // check if we've errored yet
+ require.NoError(t, s3.RangeFeed(&req, sink)) // check if we've errored yet
require.NoError(t, sink.Error())
t.Logf("started rangefeed on %s", repl3)
@@ -1623,6 +1628,10 @@ func newChannelSink(ctx context.Context, ch chan<- *kvpb.RangeFeedEvent) *channe
return &channelSink{ctx: ctx, ch: ch, done: make(chan *kvpb.Error, 1)}
}
+func (c *channelSink) Context() context.Context {
+ return c.ctx
+}
+
func (c *channelSink) SendUnbufferedIsThreadSafe() {}
func (c *channelSink) SendUnbuffered(e *kvpb.RangeFeedEvent) error {
@@ -1661,13 +1670,13 @@ func TestRangeFeedMetadataManualSplit(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- testutils.RunValues(t, "feed_type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx := context.Background()
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
kvserver.RangefeedEnabled.Override(ctx, &settings.SV, true)
srv, _, db := serverutils.StartServer(t, base.TestServerArgs{
Settings: settings,
@@ -1785,13 +1794,13 @@ func TestRangeFeedMetadataAutoSplit(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
- testutils.RunValues(t, "feed_type", feedTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "feed_type", procTypes, func(t *testing.T, s feedProcessorType) {
ctx := context.Background()
settings := cluster.MakeTestingClusterSettings()
// We must enable desired scheduler settings before we start cluster,
// otherwise we will trigger processor restarts later and this test can't
// handle duplicated events.
- kvserver.RangefeedUseBufferedSender.Override(ctx, &settings.SV, rt.useBufferedSender)
+ kvserver.RangeFeedUseScheduler.Override(ctx, &settings.SV, s.useScheduler)
kvserver.RangefeedEnabled.Override(ctx, &settings.SV, true)
// Lower the closed timestamp target duration to speed up the test.
closedts.TargetDuration.Override(ctx, &settings.SV, 100*time.Millisecond)
diff --git a/pkg/kv/kvpb/BUILD.bazel b/pkg/kv/kvpb/BUILD.bazel
index 4e74f04291b4..6be0090e411f 100644
--- a/pkg/kv/kvpb/BUILD.bazel
+++ b/pkg/kv/kvpb/BUILD.bazel
@@ -74,7 +74,6 @@ go_test(
"//pkg/storage/enginepb",
"//pkg/testutils/echotest",
"//pkg/util/buildutil",
- "//pkg/util/encoding",
"//pkg/util/hlc",
"//pkg/util/protoutil",
"//pkg/util/timeutil",
diff --git a/pkg/kv/kvpb/api.go b/pkg/kv/kvpb/api.go
index 61c3d79cbf8a..0238518628e2 100644
--- a/pkg/kv/kvpb/api.go
+++ b/pkg/kv/kvpb/api.go
@@ -2520,6 +2520,8 @@ func (s *ScanStats) String() string {
// RangeFeedEventSink is an interface for sending a single rangefeed event.
type RangeFeedEventSink interface {
+ // Context returns the context for this stream.
+ Context() context.Context
// SendUnbuffered blocks until it sends the RangeFeedEvent, the stream is
// done, or the stream breaks. Send must be safe to call on the same stream in
// different goroutines.
diff --git a/pkg/kv/kvpb/string_test.go b/pkg/kv/kvpb/string_test.go
index 70751bdda3e8..2ceb51083ea0 100644
--- a/pkg/kv/kvpb/string_test.go
+++ b/pkg/kv/kvpb/string_test.go
@@ -18,7 +18,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/testutils/echotest"
- "github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
@@ -86,7 +85,7 @@ func TestReplicaUnavailableError(t *testing.T) {
func TestAmbiguousResultError(t *testing.T) {
ctx := context.Background()
- wrapped := errors.Errorf("boom with a %s", encoding.Unsafe("secret"))
+ wrapped := errors.Errorf("boom with a %s", redact.Unsafe("secret"))
var err error = kvpb.NewAmbiguousResultError(wrapped)
err = errors.DecodeError(ctx, errors.EncodeError(ctx, err))
require.True(t, errors.Is(err, wrapped), "%+v", err)
diff --git a/pkg/kv/kvserver/BUILD.bazel b/pkg/kv/kvserver/BUILD.bazel
index d80c08dbea41..7ba57b4b009f 100644
--- a/pkg/kv/kvserver/BUILD.bazel
+++ b/pkg/kv/kvserver/BUILD.bazel
@@ -275,7 +275,6 @@ go_test(
"client_migration_test.go",
"client_mvcc_gc_test.go",
"client_protectedts_test.go",
- "client_raft_epoch_leases_test.go",
"client_raft_helpers_test.go",
"client_raft_log_queue_test.go",
"client_raft_test.go",
@@ -399,6 +398,7 @@ go_test(
"//pkg/kv",
"//pkg/kv/kvclient",
"//pkg/kv/kvclient/kvcoord",
+ "//pkg/kv/kvclient/rangefeed",
"//pkg/kv/kvclient/rangefeed/rangefeedcache",
"//pkg/kv/kvpb",
"//pkg/kv/kvserver/abortspan",
@@ -451,7 +451,6 @@ go_test(
"//pkg/kv/kvserver/split",
"//pkg/kv/kvserver/stateloader",
"//pkg/kv/kvserver/storeliveness",
- "//pkg/kv/kvserver/storeliveness/storelivenesspb",
"//pkg/kv/kvserver/tenantrate",
"//pkg/kv/kvserver/tscache",
"//pkg/kv/kvserver/txnwait",
diff --git a/pkg/kv/kvserver/client_raft_epoch_leases_test.go b/pkg/kv/kvserver/client_raft_epoch_leases_test.go
deleted file mode 100644
index 53f8996b73dc..000000000000
--- a/pkg/kv/kvserver/client_raft_epoch_leases_test.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2024 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package kvserver_test
-
-import (
- "context"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/cockroachdb/cockroach/pkg/base"
- "github.com/cockroachdb/cockroach/pkg/kv"
- "github.com/cockroachdb/cockroach/pkg/kv/kvserver"
- "github.com/cockroachdb/cockroach/pkg/raft"
- "github.com/cockroachdb/cockroach/pkg/raft/raftpb"
- "github.com/cockroachdb/cockroach/pkg/roachpb"
- "github.com/cockroachdb/cockroach/pkg/settings/cluster"
- "github.com/cockroachdb/cockroach/pkg/testutils"
- "github.com/cockroachdb/cockroach/pkg/testutils/skip"
- "github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
- "github.com/cockroachdb/cockroach/pkg/util/leaktest"
- "github.com/cockroachdb/cockroach/pkg/util/log"
- "github.com/stretchr/testify/require"
-)
-
-// Raft related tests that are very tightly coupled to epoch based leases. An
-// equivalent leader leases variant of these tests should exist elsewhere. As
-// such, we should be able to get rid of these tests once epoch based leases are
-// no longer supported.
-
-// TestRaftCheckQuorumEpochLeases tests that Raft CheckQuorum works properly
-// with epoch based leases, i.e. that a leader will step down if it hasn't heard
-// from a quorum of followers in the last election timeout interval.
-//
-// n1 (leader)
-// x x
-// x x
-// (follower) n2 ---- n3 (follower)
-//
-// We test this by partitioning the leader away from two followers, using either
-// a symmetric or asymmetric partition. In the asymmetric case, the leader can
-// send heartbeats to followers, but won't receive responses. Eventually, it
-// should step down and the followers should elect a new leader.
-//
-// We also test this with both quiesced and unquiesced ranges.
-func TestRaftCheckQuorumEpochLeases(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
-
- // This test is timing-sensitive, so skip it under deadlock detector and
- // race.
- skip.UnderDeadlock(t)
- skip.UnderRace(t)
-
- testutils.RunTrueAndFalse(t, "symmetric", func(t *testing.T, symmetric bool) {
- testutils.RunTrueAndFalse(t, "quiesce", func(t *testing.T, quiesce bool) {
- ctx := context.Background()
-
- // Only run the test with epoch based leases.
- st := cluster.MakeTestingClusterSettings()
- kvserver.TransferExpirationLeasesFirstEnabled.Override(ctx, &st.SV, false)
- kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false)
-
- tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{
- ReplicationMode: base.ReplicationManual,
- ServerArgs: base.TestServerArgs{
- Settings: st,
- RaftConfig: base.RaftConfig{
- RaftEnableCheckQuorum: true,
- RaftTickInterval: 200 * time.Millisecond, // speed up test
- },
- Knobs: base.TestingKnobs{
- Store: &kvserver.StoreTestingKnobs{
- DisableQuiescence: !quiesce,
- },
- },
- },
- })
- defer tc.Stopper().Stop(ctx)
-
- logStatus := func(s *raft.Status) {
- t.Helper()
- require.NotNil(t, s)
- t.Logf("n%d %s at term=%d commit=%d", s.ID, s.RaftState, s.Term, s.Commit)
- }
-
- // Create a range, upreplicate it, and replicate a write.
- sender := tc.GetFirstStoreFromServer(t, 0).TestSender()
- key := tc.ScratchRange(t)
- desc := tc.AddVotersOrFatal(t, key, tc.Targets(1, 2)...)
-
- _, pErr := kv.SendWrapped(ctx, sender, incrementArgs(key, 1))
- require.NoError(t, pErr.GoError())
- tc.WaitForValues(t, key, []int64{1, 1, 1})
-
- repl1, err := tc.GetFirstStoreFromServer(t, 0).GetReplica(desc.RangeID)
- require.NoError(t, err)
- repl2, err := tc.GetFirstStoreFromServer(t, 1).GetReplica(desc.RangeID)
- require.NoError(t, err)
- repl3, err := tc.GetFirstStoreFromServer(t, 2).GetReplica(desc.RangeID)
- require.NoError(t, err)
-
- // Set up dropping of inbound messages on n1 from n2,n3, but don't
- // activate it yet.
- var partitioned atomic.Bool
- dropRaftMessagesFrom(t, tc.Servers[0], desc, []roachpb.ReplicaID{2, 3}, &partitioned)
- if symmetric {
- // Drop outbound messages from n1 to n2,n3 too.
- dropRaftMessagesFrom(t, tc.Servers[1], desc, []roachpb.ReplicaID{1}, &partitioned)
- dropRaftMessagesFrom(t, tc.Servers[2], desc, []roachpb.ReplicaID{1}, &partitioned)
- }
-
- // Make sure the lease is on n1 and that everyone has applied it.
- tc.TransferRangeLeaseOrFatal(t, desc, tc.Target(0))
- _, pErr = kv.SendWrapped(ctx, sender, incrementArgs(key, 1))
- require.NoError(t, pErr.GoError())
- tc.WaitForValues(t, key, []int64{2, 2, 2})
- t.Logf("n1 has lease")
-
- // Wait for the range to quiesce, if enabled.
- if quiesce {
- require.Eventually(t, func() bool {
- return repl1.IsQuiescent() && repl2.IsQuiescent() && repl3.IsQuiescent()
- }, 10*time.Second, 100*time.Millisecond)
- t.Logf("n1, n2, and n3 quiesced")
- } else {
- require.False(t, repl1.IsQuiescent() || repl2.IsQuiescent() || repl3.IsQuiescent())
- t.Logf("n1, n2, and n3 not quiesced")
- }
-
- // Partition n1.
- partitioned.Store(true)
- t.Logf("n1 partitioned")
-
- // Fetch the leader's initial status.
- initialStatus := repl1.RaftStatus()
- require.Equal(t, raftpb.StateLeader, initialStatus.RaftState)
- logStatus(initialStatus)
-
- // Unquiesce the leader if necessary. We have to do so by submitting an
- // empty proposal, otherwise the leader will immediately quiesce again.
- if quiesce {
- ok, err := repl1.MaybeUnquiesceAndPropose()
- require.NoError(t, err)
- require.True(t, ok)
- t.Logf("n1 unquiesced")
- } else {
- require.False(t, repl1.IsQuiescent())
- t.Logf("n1 not quiesced")
- }
-
- // Wait for the leader to become a candidate.
- require.Eventually(t, func() bool {
- status := repl1.RaftStatus()
- logStatus(status)
- return status.RaftState == raftpb.StatePreCandidate
- }, 10*time.Second, 500*time.Millisecond)
- t.Logf("n1 became pre-candidate")
-
- // In the case of a symmetric partition of a quiesced range, we have to
- // wake up n2 to elect a new leader.
- if quiesce && symmetric {
- require.True(t, repl2.MaybeUnquiesce())
- t.Logf("n2 unquiesced")
- }
-
- // n2 or n3 should elect a new leader.
- var leaderStatus *raft.Status
- require.Eventually(t, func() bool {
- for _, status := range []*raft.Status{repl2.RaftStatus(), repl3.RaftStatus()} {
- logStatus(status)
- if status.RaftState == raftpb.StateLeader {
- leaderStatus = status
- return true
- }
- }
- return false
- }, 10*time.Second, 500*time.Millisecond)
- t.Logf("n%d became leader", leaderStatus.ID)
-
- // n1 should remain pre-candidate, since it doesn't hear about the new
- // leader.
- require.Never(t, func() bool {
- status := repl1.RaftStatus()
- logStatus(status)
- return status.RaftState != raftpb.StatePreCandidate
- }, 3*time.Second, 500*time.Millisecond)
- t.Logf("n1 remains pre-candidate")
-
- // The existing leader shouldn't have been affected by n1's prevotes.
- var finalStatus *raft.Status
- for _, status := range []*raft.Status{repl2.RaftStatus(), repl3.RaftStatus()} {
- logStatus(status)
- if status.RaftState == raftpb.StateLeader {
- finalStatus = status
- break
- }
- }
- require.NotNil(t, finalStatus)
- require.Equal(t, leaderStatus.ID, finalStatus.ID)
- require.Equal(t, leaderStatus.Term, finalStatus.Term)
- })
- })
-}
diff --git a/pkg/kv/kvserver/client_raft_helpers_test.go b/pkg/kv/kvserver/client_raft_helpers_test.go
index aa4c003e48d8..b29cfcacdf23 100644
--- a/pkg/kv/kvserver/client_raft_helpers_test.go
+++ b/pkg/kv/kvserver/client_raft_helpers_test.go
@@ -14,11 +14,8 @@ import (
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb"
- "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness"
- "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness/storelivenesspb"
"github.com/cockroachdb/cockroach/pkg/raft"
"github.com/cockroachdb/cockroach/pkg/roachpb"
- "github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/log"
@@ -229,8 +226,8 @@ func (h *testClusterStoreRaftMessageHandler) HandleDelegatedSnapshot(
return store.HandleDelegatedSnapshot(ctx, req)
}
-// testClusterPartitionedRange is a convenient abstraction to create a range on
-// a node in a multiTestContext which can be partitioned and unpartitioned.
+// testClusterPartitionedRange is a convenient abstraction to create a range on a node
+// in a multiTestContext which can be partitioned and unpartitioned.
type testClusterPartitionedRange struct {
rangeID roachpb.RangeID
mu struct {
@@ -412,46 +409,32 @@ func (pr *testClusterPartitionedRange) extend(
}
// dropRaftMessagesFrom sets up a Raft message handler on the given server that
-// drops inbound Raft messages from the given range and replica IDs. In addition
-// to raft messages, StoreLiveness messages from the replica IDs' store are also
-// dropped. Outbound messages are not affected, and must be dropped on the
-// receiver.
+// drops inbound Raft messages from the given range and replica IDs. Outbound
+// messages are not affected, and must be dropped on the receiver.
//
// If cond is given, messages are only dropped when the atomic bool is true.
// Otherwise, messages are always dropped.
//
-// This will replace the previous message handlers, if any.
+// This will replace the previous message handler, if any.
func dropRaftMessagesFrom(
t *testing.T,
srv serverutils.TestServerInterface,
- desc roachpb.RangeDescriptor,
+ rangeID roachpb.RangeID,
fromReplicaIDs []roachpb.ReplicaID,
cond *atomic.Bool,
) {
- store, err := srv.GetStores().(*kvserver.Stores).GetStore(srv.GetFirstStoreID())
- require.NoError(t, err)
-
dropFrom := map[roachpb.ReplicaID]bool{}
- dropFromStore := map[roachpb.StoreID]bool{}
for _, id := range fromReplicaIDs {
dropFrom[id] = true
- rep, ok := desc.GetReplicaDescriptorByID(id)
- if !ok {
- t.Fatalf("replica %d not found in range descriptor %v", id, desc)
- }
- t.Logf("from store %d; adding replica %s to drop list", store.StoreID(), id)
- t.Logf("from store %d; adding store %s to drop list", store.StoreID(), rep.StoreID)
- dropFromStore[rep.StoreID] = true
}
shouldDrop := func(rID roachpb.RangeID, from roachpb.ReplicaID) bool {
- return rID == desc.RangeID && (cond == nil || cond.Load()) && dropFrom[from]
- }
- shouldDropFromStore := func(from roachpb.StoreID) bool {
- return (cond == nil || cond.Load()) && dropFromStore[from]
+ return rID == rangeID && (cond == nil || cond.Load()) && dropFrom[from]
}
+ store, err := srv.GetStores().(*kvserver.Stores).GetStore(srv.GetFirstStoreID())
+ require.NoError(t, err)
srv.RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{
- rangeID: desc.RangeID,
+ rangeID: rangeID,
IncomingRaftMessageHandler: store,
unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{
dropHB: func(hb *kvserverpb.RaftHeartbeat) bool {
@@ -465,18 +448,6 @@ func dropRaftMessagesFrom(
},
},
})
- srv.StoreLivenessTransport().(*storeliveness.Transport).ListenMessages(store.StoreID(), &storeliveness.UnreliableHandler{
- MessageHandler: store.TestingStoreLivenessMessageHandler(),
- UnreliableHandlerFuncs: storeliveness.UnreliableHandlerFuncs{
- DropStoreLivenessMsg: func(msg *storelivenesspb.Message) bool {
- drop := shouldDropFromStore(msg.From.StoreID)
- if drop {
- t.Logf("dropping msg %s from store %d: to %d", msg.Type, msg.From.StoreID, msg.To.StoreID)
- }
- return drop
- },
- },
- })
}
// getMapsDiff returns the difference between the values of corresponding
@@ -491,11 +462,3 @@ func getMapsDiff(beforeMap map[string]int64, afterMap map[string]int64) map[stri
}
return diffMap
}
-
-// alwaysRunWithLeaderLeases configures settings to ensure the caller is always
-// using leader leases, regardless of any metamorphic constants.
-func alwaysRunWithLeaderLeases(ctx context.Context, st *cluster.Settings) {
- kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false)
- kvserver.LeaderLeasesEnabled.Override(ctx, &st.SV, true)
- kvserver.RaftLeaderFortificationFractionEnabled.Override(ctx, &st.SV, 1.0)
-}
diff --git a/pkg/kv/kvserver/client_raft_test.go b/pkg/kv/kvserver/client_raft_test.go
index 76d576db8e0e..bdf8433f16aa 100644
--- a/pkg/kv/kvserver/client_raft_test.go
+++ b/pkg/kv/kvserver/client_raft_test.go
@@ -2817,7 +2817,7 @@ func TestRaftHeartbeats(t *testing.T) {
// Wait for several ticks to elapse.
ticksToWait := 2 * store.GetStoreConfig().RaftElectionTimeoutTicks
ticks := store.Metrics().RaftTicks.Count
- for targetTicks := ticks() + ticksToWait; ticks() < targetTicks; {
+ for targetTicks := ticks() + int64(ticksToWait); ticks() < targetTicks; {
time.Sleep(time.Millisecond)
}
@@ -2872,7 +2872,7 @@ func TestReportUnreachableHeartbeats(t *testing.T) {
ticksToWait := 2 * leaderStore.GetStoreConfig().RaftElectionTimeoutTicks
ticks := leaderStore.Metrics().RaftTicks.Count
- for targetTicks := ticks() + ticksToWait; ticks() < targetTicks; {
+ for targetTicks := ticks() + int64(ticksToWait); ticks() < targetTicks; {
time.Sleep(time.Millisecond)
}
@@ -4390,7 +4390,7 @@ func TestRangeQuiescence(t *testing.T) {
// Wait for a bunch of ticks to occur which will allow the follower time to
// campaign.
ticks := tc.GetFirstStoreFromServer(t, followerIdx).Metrics().RaftTicks.Count
- for targetTicks := ticks() + 2*tc.GetFirstStoreFromServer(t, followerIdx).GetStoreConfig().RaftElectionTimeoutTicks; ticks() < targetTicks; {
+ for targetTicks := ticks() + int64(2*tc.GetFirstStoreFromServer(t, followerIdx).GetStoreConfig().RaftElectionTimeoutTicks); ticks() < targetTicks; {
time.Sleep(time.Millisecond)
}
@@ -6313,13 +6313,13 @@ func TestRaftPreVote(t *testing.T) {
// Configure the partition, but don't activate it yet.
if partial {
// Partition n3 away from n1, in both directions.
- dropRaftMessagesFrom(t, tc.Servers[0], desc, []roachpb.ReplicaID{3}, &partitioned)
- dropRaftMessagesFrom(t, tc.Servers[2], desc, []roachpb.ReplicaID{1}, &partitioned)
+ dropRaftMessagesFrom(t, tc.Servers[0], rangeID, []roachpb.ReplicaID{3}, &partitioned)
+ dropRaftMessagesFrom(t, tc.Servers[2], rangeID, []roachpb.ReplicaID{1}, &partitioned)
} else {
// Partition n3 away from both of n1 and n2, in both directions.
- dropRaftMessagesFrom(t, tc.Servers[0], desc, []roachpb.ReplicaID{3}, &partitioned)
- dropRaftMessagesFrom(t, tc.Servers[1], desc, []roachpb.ReplicaID{3}, &partitioned)
- dropRaftMessagesFrom(t, tc.Servers[2], desc, []roachpb.ReplicaID{1, 2}, &partitioned)
+ dropRaftMessagesFrom(t, tc.Servers[0], rangeID, []roachpb.ReplicaID{3}, &partitioned)
+ dropRaftMessagesFrom(t, tc.Servers[1], rangeID, []roachpb.ReplicaID{3}, &partitioned)
+ dropRaftMessagesFrom(t, tc.Servers[2], rangeID, []roachpb.ReplicaID{1, 2}, &partitioned)
}
// Make sure the lease is on n1 and that everyone has applied it.
@@ -6472,7 +6472,7 @@ func TestRaftPreVote(t *testing.T) {
// send heartbeats to followers, but won't receive responses. Eventually, it
// should step down and the followers should elect a new leader.
//
-// Only runs with leader leases.
+// We also test this with both quiesced and unquiesced ranges.
func TestRaftCheckQuorum(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
@@ -6483,129 +6483,153 @@ func TestRaftCheckQuorum(t *testing.T) {
skip.UnderRace(t)
testutils.RunTrueAndFalse(t, "symmetric", func(t *testing.T, symmetric bool) {
- ctx := context.Background()
-
- // Turn on leader leases.
- st := cluster.MakeTestingClusterSettings()
- alwaysRunWithLeaderLeases(ctx, st)
+ testutils.RunTrueAndFalse(t, "quiesce", func(t *testing.T, quiesce bool) {
+ ctx := context.Background()
- tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{
- ReplicationMode: base.ReplicationManual,
- ServerArgs: base.TestServerArgs{
- Settings: st,
- RaftConfig: base.RaftConfig{
- RaftEnableCheckQuorum: true,
- RaftTickInterval: 200 * time.Millisecond, // speed up test
+ // Disable expiration-based leases, since these prevent quiescence.
+ st := cluster.MakeTestingClusterSettings()
+ kvserver.TransferExpirationLeasesFirstEnabled.Override(ctx, &st.SV, false)
+ kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false)
+
+ tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{
+ ReplicationMode: base.ReplicationManual,
+ ServerArgs: base.TestServerArgs{
+ Settings: st,
+ RaftConfig: base.RaftConfig{
+ RaftEnableCheckQuorum: true,
+ RaftTickInterval: 200 * time.Millisecond, // speed up test
+ },
+ Knobs: base.TestingKnobs{
+ Store: &kvserver.StoreTestingKnobs{
+ DisableQuiescence: !quiesce,
+ },
+ },
},
- },
- })
- defer tc.Stopper().Stop(ctx)
+ })
+ defer tc.Stopper().Stop(ctx)
- logStatus := func(s *raft.Status) {
- t.Helper()
- require.NotNil(t, s)
- t.Logf("n%d %s at term=%d commit=%d", s.ID, s.RaftState, s.Term, s.Commit)
- }
+ logStatus := func(s *raft.Status) {
+ t.Helper()
+ require.NotNil(t, s)
+ t.Logf("n%d %s at term=%d commit=%d", s.ID, s.RaftState, s.Term, s.Commit)
+ }
- // Create a range, upreplicate it, and replicate a write.
- sender := tc.GetFirstStoreFromServer(t, 0).TestSender()
- key := tc.ScratchRange(t)
- desc := tc.AddVotersOrFatal(t, key, tc.Targets(1, 2)...)
+ // Create a range, upreplicate it, and replicate a write.
+ sender := tc.GetFirstStoreFromServer(t, 0).TestSender()
+ key := tc.ScratchRange(t)
+ desc := tc.AddVotersOrFatal(t, key, tc.Targets(1, 2)...)
- _, pErr := kv.SendWrapped(ctx, sender, incrementArgs(key, 1))
- require.NoError(t, pErr.GoError())
- tc.WaitForValues(t, key, []int64{1, 1, 1})
+ _, pErr := kv.SendWrapped(ctx, sender, incrementArgs(key, 1))
+ require.NoError(t, pErr.GoError())
+ tc.WaitForValues(t, key, []int64{1, 1, 1})
- repl1, err := tc.GetFirstStoreFromServer(t, 0).GetReplica(desc.RangeID)
- require.NoError(t, err)
- repl2, err := tc.GetFirstStoreFromServer(t, 1).GetReplica(desc.RangeID)
- require.NoError(t, err)
- repl3, err := tc.GetFirstStoreFromServer(t, 2).GetReplica(desc.RangeID)
- require.NoError(t, err)
+ repl1, err := tc.GetFirstStoreFromServer(t, 0).GetReplica(desc.RangeID)
+ require.NoError(t, err)
+ repl2, err := tc.GetFirstStoreFromServer(t, 1).GetReplica(desc.RangeID)
+ require.NoError(t, err)
+ repl3, err := tc.GetFirstStoreFromServer(t, 2).GetReplica(desc.RangeID)
+ require.NoError(t, err)
- // Set up dropping of inbound messages on n1 from n2,n3, but don't
- // activate it yet.
- var partitioned atomic.Bool
- dropRaftMessagesFrom(t, tc.Servers[0], desc, []roachpb.ReplicaID{2, 3}, &partitioned)
- if symmetric {
- // Drop outbound messages from n1 to n2,n3 too.
- dropRaftMessagesFrom(t, tc.Servers[1], desc, []roachpb.ReplicaID{1}, &partitioned)
- dropRaftMessagesFrom(t, tc.Servers[2], desc, []roachpb.ReplicaID{1}, &partitioned)
- }
+ // Set up dropping of inbound messages on n1 from n2,n3, but don't
+ // activate it yet.
+ var partitioned atomic.Bool
+ dropRaftMessagesFrom(t, tc.Servers[0], desc.RangeID, []roachpb.ReplicaID{2, 3}, &partitioned)
+ if symmetric {
+ // Drop outbound messages from n1 to n2,n3 too.
+ dropRaftMessagesFrom(t, tc.Servers[1], desc.RangeID, []roachpb.ReplicaID{1}, &partitioned)
+ dropRaftMessagesFrom(t, tc.Servers[2], desc.RangeID, []roachpb.ReplicaID{1}, &partitioned)
+ }
- // Make sure the lease is on n1 and that everyone has applied it.
- tc.TransferRangeLeaseOrFatal(t, desc, tc.Target(0))
- tc.WaitForLeaseUpgrade(ctx, t, desc)
- _, pErr = kv.SendWrapped(ctx, sender, incrementArgs(key, 1))
- require.NoError(t, pErr.GoError())
- tc.WaitForValues(t, key, []int64{2, 2, 2})
- t.Logf("n1 has lease")
-
- // Partition n1.
- partitioned.Store(true)
- t.Logf("n1 partitioned")
-
- // Fetch the leader's initial status.
- initialStatus := repl1.RaftStatus()
- require.Equal(t, raftpb.StateLeader, initialStatus.RaftState)
- logStatus(initialStatus)
-
- require.False(t, repl1.IsQuiescent())
- t.Logf("n1 not quiesced")
-
- // Wait for the leader to become a candidate.
- require.Eventually(t, func() bool {
- status := repl1.RaftStatus()
- logStatus(status)
- // TODO(ibrahim): once we start checking StoreLiveness before
- // transitioning to a pre-candidate, we'll need to switch this (and the
- // conditional below) to handle this.
- return status.RaftState == raftpb.StatePreCandidate
- }, 10*time.Second, 500*time.Millisecond)
- t.Logf("n1 became pre-candidate")
-
- // n2 or n3 should elect a new leader.
- var leaderStatus *raft.Status
- require.Eventually(t, func() bool {
+ // Make sure the lease is on n1 and that everyone has applied it.
+ tc.TransferRangeLeaseOrFatal(t, desc, tc.Target(0))
+ _, pErr = kv.SendWrapped(ctx, sender, incrementArgs(key, 1))
+ require.NoError(t, pErr.GoError())
+ tc.WaitForValues(t, key, []int64{2, 2, 2})
+ t.Logf("n1 has lease")
+
+ // Wait for the range to quiesce, if enabled.
+ if quiesce {
+ require.Eventually(t, func() bool {
+ return repl1.IsQuiescent() && repl2.IsQuiescent() && repl3.IsQuiescent()
+ }, 10*time.Second, 100*time.Millisecond)
+ t.Logf("n1, n2, and n3 quiesced")
+ } else {
+ require.False(t, repl1.IsQuiescent() || repl2.IsQuiescent() || repl3.IsQuiescent())
+ t.Logf("n1, n2, and n3 not quiesced")
+ }
+
+ // Partition n1.
+ partitioned.Store(true)
+ t.Logf("n1 partitioned")
+
+ // Fetch the leader's initial status.
+ initialStatus := repl1.RaftStatus()
+ require.Equal(t, raftpb.StateLeader, initialStatus.RaftState)
+ logStatus(initialStatus)
+
+ // Unquiesce the leader if necessary. We have to do so by submitting an
+ // empty proposal, otherwise the leader will immediately quiesce again.
+ if quiesce {
+ ok, err := repl1.MaybeUnquiesceAndPropose()
+ require.NoError(t, err)
+ require.True(t, ok)
+ t.Logf("n1 unquiesced")
+ } else {
+ require.False(t, repl1.IsQuiescent())
+ t.Logf("n1 not quiesced")
+ }
+
+ // Wait for the leader to become a candidate.
+ require.Eventually(t, func() bool {
+ status := repl1.RaftStatus()
+ logStatus(status)
+ return status.RaftState == raftpb.StatePreCandidate
+ }, 10*time.Second, 500*time.Millisecond)
+ t.Logf("n1 became pre-candidate")
+
+ // In the case of a symmetric partition of a quiesced range, we have to
+ // wake up n2 to elect a new leader.
+ if quiesce && symmetric {
+ require.True(t, repl2.MaybeUnquiesce())
+ t.Logf("n2 unquiesced")
+ }
+
+ // n2 or n3 should elect a new leader.
+ var leaderStatus *raft.Status
+ require.Eventually(t, func() bool {
+ for _, status := range []*raft.Status{repl2.RaftStatus(), repl3.RaftStatus()} {
+ logStatus(status)
+ if status.RaftState == raftpb.StateLeader {
+ leaderStatus = status
+ return true
+ }
+ }
+ return false
+ }, 10*time.Second, 500*time.Millisecond)
+ t.Logf("n%d became leader", leaderStatus.ID)
+
+ // n1 should remain pre-candidate, since it doesn't hear about the new
+ // leader.
+ require.Never(t, func() bool {
+ status := repl1.RaftStatus()
+ logStatus(status)
+ return status.RaftState != raftpb.StatePreCandidate
+ }, 3*time.Second, 500*time.Millisecond)
+ t.Logf("n1 remains pre-candidate")
+
+ // The existing leader shouldn't have been affected by n1's prevotes.
+ var finalStatus *raft.Status
for _, status := range []*raft.Status{repl2.RaftStatus(), repl3.RaftStatus()} {
logStatus(status)
if status.RaftState == raftpb.StateLeader {
- leaderStatus = status
- return true
+ finalStatus = status
+ break
}
}
- return false
- }, 10*time.Second, 500*time.Millisecond)
- t.Logf("n%d became leader", leaderStatus.ID)
-
- // n1 should remain pre-candidate, since it doesn't hear about the new
- // leader.
- require.Never(t, func() bool {
- status := repl1.RaftStatus()
- logStatus(status)
- // TODO(ibrahim): uncomment this once we start checking StoreLiveness
- // before transitioning to a pre-candidate.
- //expState := status.RaftState == raftpb.StateFollower && status.Lead == raft.None
- //return !expState // require.Never
- return status.RaftState != raftpb.StatePreCandidate
- }, 3*time.Second, 500*time.Millisecond)
- t.Logf("n1 remains pre-candidate")
-
- // The existing leader shouldn't have been affected by n1's prevotes.
- // TODO(ibrahim): This portion of the test can be removed entirely once we
- // don't even transition to a pre-candidate because StoreLiveness doesn't
- // let us.
- var finalStatus *raft.Status
- for _, status := range []*raft.Status{repl2.RaftStatus(), repl3.RaftStatus()} {
- logStatus(status)
- if status.RaftState == raftpb.StateLeader {
- finalStatus = status
- break
- }
- }
- require.NotNil(t, finalStatus)
- require.Equal(t, leaderStatus.ID, finalStatus.ID)
- require.Equal(t, leaderStatus.Term, finalStatus.Term)
+ require.NotNil(t, finalStatus)
+ require.Equal(t, leaderStatus.ID, finalStatus.ID)
+ require.Equal(t, leaderStatus.Term, finalStatus.Term)
+ })
})
}
@@ -6900,9 +6924,9 @@ func TestRaftPreVoteUnquiesceDeadLeader(t *testing.T) {
// Set up a complete partition for n1, but don't activate it yet.
var partitioned atomic.Bool
- dropRaftMessagesFrom(t, tc.Servers[0], desc, []roachpb.ReplicaID{2, 3}, &partitioned)
- dropRaftMessagesFrom(t, tc.Servers[1], desc, []roachpb.ReplicaID{1}, &partitioned)
- dropRaftMessagesFrom(t, tc.Servers[2], desc, []roachpb.ReplicaID{1}, &partitioned)
+ dropRaftMessagesFrom(t, tc.Servers[0], desc.RangeID, []roachpb.ReplicaID{2, 3}, &partitioned)
+ dropRaftMessagesFrom(t, tc.Servers[1], desc.RangeID, []roachpb.ReplicaID{1}, &partitioned)
+ dropRaftMessagesFrom(t, tc.Servers[2], desc.RangeID, []roachpb.ReplicaID{1}, &partitioned)
// Make sure the lease is on n1 and that everyone has applied it.
tc.TransferRangeLeaseOrFatal(t, desc, tc.Target(0))
diff --git a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go
index b8253cbc0d11..36a4816b3e06 100644
--- a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go
+++ b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go
@@ -457,6 +457,10 @@ func newDummyStream(ctx context.Context, name string) *dummyStream {
}
}
+func (s *dummyStream) Context() context.Context {
+ return s.ctx
+}
+
func (s *dummyStream) SendUnbufferedIsThreadSafe() {}
func (s *dummyStream) SendUnbuffered(ev *kvpb.RangeFeedEvent) error {
@@ -489,7 +493,7 @@ func waitReplicaRangeFeed(
return stream.SendUnbuffered(&event)
}
- err := r.RangeFeed(stream.ctx, req, stream, nil /* pacer */)
+ err := r.RangeFeed(req, stream, nil /* pacer */)
if err != nil {
return sendErrToStream(kvpb.NewError(err))
}
@@ -856,9 +860,9 @@ func TestReplicaCircuitBreaker_Partial_Retry(t *testing.T) {
// requests and node liveness heartbeats still succeed.
partitioned := &atomic.Bool{}
partitioned.Store(true)
- dropRaftMessagesFrom(t, n1, desc, []roachpb.ReplicaID{3}, partitioned)
- dropRaftMessagesFrom(t, n2, desc, []roachpb.ReplicaID{3}, partitioned)
- dropRaftMessagesFrom(t, n3, desc, []roachpb.ReplicaID{1, 2}, partitioned)
+ dropRaftMessagesFrom(t, n1, desc.RangeID, []roachpb.ReplicaID{3}, partitioned)
+ dropRaftMessagesFrom(t, n2, desc.RangeID, []roachpb.ReplicaID{3}, partitioned)
+ dropRaftMessagesFrom(t, n3, desc.RangeID, []roachpb.ReplicaID{1, 2}, partitioned)
t.Logf("partitioned n3 raft traffic from n1 and n2")
repl3.TripBreaker()
@@ -897,8 +901,8 @@ func TestReplicaCircuitBreaker_Partial_Retry(t *testing.T) {
// Also partition n1 and n2 away from each other, and trip their breakers. All
// nodes are now completely partitioned away from each other.
- dropRaftMessagesFrom(t, n1, desc, []roachpb.ReplicaID{2, 3}, partitioned)
- dropRaftMessagesFrom(t, n2, desc, []roachpb.ReplicaID{1, 3}, partitioned)
+ dropRaftMessagesFrom(t, n1, desc.RangeID, []roachpb.ReplicaID{2, 3}, partitioned)
+ dropRaftMessagesFrom(t, n2, desc.RangeID, []roachpb.ReplicaID{1, 3}, partitioned)
repl1.TripBreaker()
repl2.TripBreaker()
diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/range_controller_test.go b/pkg/kv/kvserver/kvflowcontrol/rac2/range_controller_test.go
index ed1e9ef4be18..bf00768862f1 100644
--- a/pkg/kv/kvserver/kvflowcontrol/rac2/range_controller_test.go
+++ b/pkg/kv/kvserver/kvflowcontrol/rac2/range_controller_test.go
@@ -394,6 +394,11 @@ type testingRCRange struct {
snapshots []testingTrackerSnapshot
raftLog raft.MemoryStorage
+ // mu is ordered after RaftMu.
+ //
+ // This is because we hold RaftMu when calling into the RangeController,
+ // which in turn may call back out to the testingRCRange for state
+ // information, as it mocks the dependencies of the RangeController.
mu struct {
syncutil.Mutex
r testingRange
@@ -506,26 +511,37 @@ func (r *testingRCRange) startWaitForEval(name string, pri admissionpb.WorkPrior
}
func (r *testingRCRange) admit(ctx context.Context, storeID roachpb.StoreID, av AdmittedVector) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- for _, replDesc := range sortReplicasLocked(r) {
- replica := r.mu.r.replicaSet[replDesc.ReplicaID]
- if replica.desc.StoreID == storeID {
- for _, v := range av.Admitted {
- // Ensure that Match doesn't lag behind the highest index in the
- // AdmittedVector.
- replica.info.Match = max(replica.info.Match, v)
+ var replicaID roachpb.ReplicaID
+ var found bool
+ func() {
+ // We need to ensure that r.mu isn't held before (and while) holding
+ // RaftMu, in order to order the locks correctly (RaftMu before
+ // testingRCRange.mu).
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ for _, replDesc := range sortReplicasLocked(r) {
+ replica := r.mu.r.replicaSet[replDesc.ReplicaID]
+ if replica.desc.StoreID == storeID {
+ for _, v := range av.Admitted {
+ // Ensure that Match doesn't lag behind the highest index in the
+ // AdmittedVector.
+ replica.info.Match = max(replica.info.Match, v)
+ }
+ replicaID = replica.desc.ReplicaID
+ r.mu.r.replicaSet[replicaID] = replica
+ found = true
+ break
}
- r.mu.r.replicaSet[replica.desc.ReplicaID] = replica
- func() {
- r.rc.opts.ReplicaMutexAsserter.RaftMu.Lock()
- defer r.rc.opts.ReplicaMutexAsserter.RaftMu.Unlock()
- r.rc.AdmitRaftMuLocked(ctx, replica.desc.ReplicaID, av)
- }()
- return
}
+ }()
+
+ if !found {
+ panic("replica not found")
}
+
+ r.rc.opts.ReplicaMutexAsserter.RaftMu.Lock()
+ defer r.rc.opts.ReplicaMutexAsserter.RaftMu.Unlock()
+ r.rc.AdmitRaftMuLocked(ctx, replicaID, av)
}
type testingRange struct {
diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/store_stream.go b/pkg/kv/kvserver/kvflowcontrol/rac2/store_stream.go
index a2d675e8513e..ef6200b4b3be 100644
--- a/pkg/kv/kvserver/kvflowcontrol/rac2/store_stream.go
+++ b/pkg/kv/kvserver/kvflowcontrol/rac2/store_stream.go
@@ -541,8 +541,8 @@ func (w *sendStreamTokenWatcher) run(_ context.Context) {
select {
case <-w.stopper.ShouldQuiesce():
return
- case <-handle.waitChannel():
- if handle.confirmHaveTokensAndUnblockNextWaiter() {
+ case <-handle.WaitChannel():
+ if handle.ConfirmHaveTokensAndUnblockNextWaiter() {
break waiting
}
}
diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter.go b/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter.go
index 555bebcf40cd..eb0cf45758b6 100644
--- a/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter.go
+++ b/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter.go
@@ -22,6 +22,40 @@ import (
"github.com/cockroachdb/redact"
)
+// TokenWaitingHandle is the interface for waiting for positive tokens from a
+// token counter.
+//
+// TODO(sumeer): remove this interface since there is only one implementation.
+type TokenWaitingHandle interface {
+ // WaitChannel is the channel that will be signaled if tokens are possibly
+ // available. If signaled, the caller must call
+ // ConfirmHaveTokensAndUnblockNextWaiter. There is no guarantee of tokens
+ // being available after this channel is signaled, just that tokens were
+ // available recently. A typical usage pattern is:
+ //
+ // for {
+ // select {
+ // case <-handle.WaitChannel():
+ // if handle.ConfirmHaveTokensAndUnblockNextWaiter() {
+ // break
+ // }
+ // }
+ // }
+ // tokenCounter.Deduct(...)
+ //
+ // There is a possibility for races, where multiple goroutines may be
+ // signaled and deduct tokens, sending the counter into debt. These cases are
+ // acceptable, as in aggregate the counter provides pacing over time.
+ WaitChannel() <-chan struct{}
+ // ConfirmHaveTokensAndUnblockNextWaiter is called to confirm tokens are
+ // available. True is returned if tokens are available, false otherwise. If
+ // no tokens are available, the caller can resume waiting using WaitChannel.
+ ConfirmHaveTokensAndUnblockNextWaiter() bool
+ // StreamString returns a string representation of the stream. Used for
+ // tracing.
+ StreamString() string
+}
+
// tokenCounterPerWorkClass is a helper struct for implementing tokenCounter.
// tokens are protected by the mutex in tokenCounter. Operations on the
// signalCh may not be protected by that mutex -- see the comment below.
@@ -283,16 +317,15 @@ func (t *tokenCounter) limit(wc admissionpb.WorkClass) kvflowcontrol.Tokens {
return t.mu.counters[wc].limit
}
-// TokensAvailable returns true if tokens are available, in which case handle
-// is empty and should be ignored. If false, it returns a handle that may be
-// used for waiting for tokens to become available.
+// TokensAvailable returns true if tokens are available. If false, it returns
+// a handle that may be used for waiting for tokens to become available.
func (t *tokenCounter) TokensAvailable(
wc admissionpb.WorkClass,
-) (available bool, handle tokenWaitHandle) {
+) (available bool, handle TokenWaitingHandle) {
if t.tokens(wc) > 0 {
- return true, tokenWaitHandle{}
+ return true, nil
}
- return false, tokenWaitHandle{wc: wc, b: t}
+ return false, waitHandle{wc: wc, b: t}
}
// TryDeduct attempts to deduct flow tokens for the given work class. If there
@@ -339,23 +372,25 @@ func (t *tokenCounter) Return(
t.adjust(ctx, wc, tokens, flag)
}
-// tokenWaitHandle is a handle for waiting for tokens to become available from
-// a token counter.
-type tokenWaitHandle struct {
+// waitHandle is a handle for waiting for tokens to become available from a
+// token counter.
+type waitHandle struct {
wc admissionpb.WorkClass
b *tokenCounter
}
-// waitChannel is the channel that will be signaled if tokens are possibly
+var _ TokenWaitingHandle = waitHandle{}
+
+// WaitChannel is the channel that will be signaled if tokens are possibly
// available. If signaled, the caller must call
-// confirmHaveTokensAndUnblockNextWaiter. There is no guarantee of tokens being
+// ConfirmHaveTokensAndUnblockNextWaiter. There is no guarantee of tokens being
// available after this channel is signaled, just that tokens were available
// recently. A typical usage pattern is:
//
// for {
// select {
-// case <-handle.waitChannel():
-// if handle.confirmHaveTokensAndUnblockNextWaiter() {
+// case <-handle.WaitChannel():
+// if handle.ConfirmHaveTokensAndUnblockNextWaiter() {
// break
// }
// }
@@ -365,14 +400,14 @@ type tokenWaitHandle struct {
// There is a possibility for races, where multiple goroutines may be signaled
// and deduct tokens, sending the counter into debt. These cases are
// acceptable, as in aggregate the counter provides pacing over time.
-func (wh tokenWaitHandle) waitChannel() <-chan struct{} {
+func (wh waitHandle) WaitChannel() <-chan struct{} {
return wh.b.mu.counters[wh.wc].signalCh
}
-// confirmHaveTokensAndUnblockNextWaiter is called to confirm tokens are
+// ConfirmHaveTokensAndUnblockNextWaiter is called to confirm tokens are
// available. True is returned if tokens are available, false otherwise. If no
-// tokens are available, the caller can resume waiting using waitChannel.
-func (wh tokenWaitHandle) confirmHaveTokensAndUnblockNextWaiter() (haveTokens bool) {
+// tokens are available, the caller can resume waiting using WaitChannel.
+func (wh waitHandle) ConfirmHaveTokensAndUnblockNextWaiter() (haveTokens bool) {
haveTokens = wh.b.tokens(wh.wc) > 0
if haveTokens {
// Signal the next waiter if we have tokens available before returning.
@@ -381,15 +416,14 @@ func (wh tokenWaitHandle) confirmHaveTokensAndUnblockNextWaiter() (haveTokens bo
return haveTokens
}
-// streamString returns a string representation of the stream. Used for
-// tracing.
-func (wh tokenWaitHandle) streamString() string {
+// StreamString implements TokenWaitingHandle.
+func (wh waitHandle) StreamString() string {
return wh.b.stream.String()
}
type tokenWaitingHandleInfo struct {
- // Can be empty, in which case no methods should be called on it.
- handle tokenWaitHandle
+ // Can be nil, in which case the wait on this can never succeed.
+ handle TokenWaitingHandle
// requiredWait will be set for the leaseholder and leader for regular work.
// For elastic work this will be set for the aforementioned, and all replicas
// which are in StateReplicate.
@@ -471,8 +505,8 @@ func WaitForEval(
requiredWaitCount++
}
var chanValue reflect.Value
- if h.handle != (tokenWaitHandle{}) {
- chanValue = reflect.ValueOf(h.handle.waitChannel())
+ if h.handle != nil {
+ chanValue = reflect.ValueOf(h.handle.WaitChannel())
}
// Else, zero Value, so will never be selected.
scratch = append(scratch,
@@ -509,7 +543,7 @@ func WaitForEval(
return ReplicaRefreshWaitSignaled, scratch
default:
handleInfo := handles[chosen-3]
- if available := handleInfo.handle.confirmHaveTokensAndUnblockNextWaiter(); !available {
+ if available := handleInfo.handle.ConfirmHaveTokensAndUnblockNextWaiter(); !available {
// The handle was signaled but does not currently have tokens
// available. Continue waiting on this handle.
continue
@@ -517,7 +551,7 @@ func WaitForEval(
if traceIndividualWaits {
log.Eventf(ctx, "wait-for-eval: waited until %s tokens available",
- handleInfo.handle.streamString())
+ handleInfo.handle.StreamString())
}
if handleInfo.partOfQuorum {
signaledQuorumCount++
diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter_test.go b/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter_test.go
index 078cda8e4344..6296b76d717a 100644
--- a/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter_test.go
+++ b/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter_test.go
@@ -297,7 +297,7 @@ func TestTokenCounter(t *testing.T) {
assertStateReset := func(t *testing.T) {
available, handle := counter.TokensAvailable(admissionpb.ElasticWorkClass)
require.True(t, available)
- require.Equal(t, tokenWaitHandle{}, handle)
+ require.Nil(t, handle)
require.Equal(t, limits.regular, counter.tokens(admissionpb.RegularWorkClass))
require.Equal(t, limits.elastic, counter.tokens(admissionpb.ElasticWorkClass))
}
@@ -307,11 +307,11 @@ func TestTokenCounter(t *testing.T) {
// classes.
available, handle := counter.TokensAvailable(admissionpb.RegularWorkClass)
require.True(t, available)
- require.Equal(t, tokenWaitHandle{}, handle)
+ require.Nil(t, handle)
available, handle = counter.TokensAvailable(admissionpb.ElasticWorkClass)
require.True(t, available)
- require.Equal(t, tokenWaitHandle{}, handle)
+ require.Nil(t, handle)
assertStateReset(t)
})
@@ -326,7 +326,7 @@ func TestTokenCounter(t *testing.T) {
// Now there should be no tokens available for regular work class.
available, handle := counter.TokensAvailable(admissionpb.RegularWorkClass)
require.False(t, available)
- require.NotEqual(t, tokenWaitHandle{}, handle)
+ require.NotNil(t, handle)
counter.Return(ctx, admissionpb.RegularWorkClass, limits.regular, AdjNormal)
assertStateReset(t)
})
@@ -353,18 +353,18 @@ func TestTokenCounter(t *testing.T) {
// returned.
available, handle := counter.TokensAvailable(admissionpb.RegularWorkClass)
require.False(t, available)
- require.NotEqual(t, tokenWaitHandle{}, handle)
+ require.NotNil(t, handle)
counter.Return(ctx, admissionpb.RegularWorkClass, limits.regular, AdjNormal)
// Wait on the handle to be unblocked and expect that there are tokens
// available when the wait channel is signaled.
- <-handle.waitChannel()
- haveTokens := handle.confirmHaveTokensAndUnblockNextWaiter()
+ <-handle.WaitChannel()
+ haveTokens := handle.ConfirmHaveTokensAndUnblockNextWaiter()
require.True(t, haveTokens)
// Wait on the handle to be unblocked again, this time try deducting such
// that there are no tokens available after.
counter.Deduct(ctx, admissionpb.RegularWorkClass, limits.regular, AdjNormal)
- <-handle.waitChannel()
- haveTokens = handle.confirmHaveTokensAndUnblockNextWaiter()
+ <-handle.WaitChannel()
+ haveTokens = handle.ConfirmHaveTokensAndUnblockNextWaiter()
require.False(t, haveTokens)
// Return the tokens deducted from the first wait above.
counter.Return(ctx, admissionpb.RegularWorkClass, limits.regular, AdjNormal)
@@ -394,14 +394,14 @@ func TestTokenCounter(t *testing.T) {
// available.
available, handle := counter.TokensAvailable(admissionpb.RegularWorkClass)
if !available {
- <-handle.waitChannel()
+ <-handle.WaitChannel()
// This may or may not have raced with another goroutine, there's
// no guarantee we have tokens here. If we don't have tokens here,
// the next call to TryDeduct will fail (unless someone returns
// tokens between here and that call), which is harmless. This test
// is using TokensAvailable and the returned handle to avoid
// busy-waiting.
- handle.confirmHaveTokensAndUnblockNextWaiter()
+ handle.ConfirmHaveTokensAndUnblockNextWaiter()
}
}
@@ -416,8 +416,8 @@ func TestTokenCounter(t *testing.T) {
})
}
-func (t *tokenCounter) testingHandle() tokenWaitHandle {
- return tokenWaitHandle{wc: admissionpb.RegularWorkClass, b: t}
+func (t *tokenCounter) testingHandle() waitHandle {
+ return waitHandle{wc: admissionpb.RegularWorkClass, b: t}
}
type namedTokenCounter struct {
diff --git a/pkg/kv/kvserver/kvserverpb/lease_status.proto b/pkg/kv/kvserver/kvserverpb/lease_status.proto
index 2248908613c1..4a492d7a31c5 100644
--- a/pkg/kv/kvserver/kvserverpb/lease_status.proto
+++ b/pkg/kv/kvserver/kvserverpb/lease_status.proto
@@ -14,7 +14,27 @@ import "util/hlc/timestamp.proto";
import "gogoproto/gogo.proto";
enum LeaseState {
- // ERROR indicates that the lease can't be used or acquired.
+ // ERROR indicates that the lease can't be used or acquired. The state is
+ // not a definitive indication of the lease's validity. Rather, it is an
+ // indication that the validity is indeterminate; it may be valid or it
+ // may not be.
+ //
+ // The ERROR state is returned in the following cases:
+ // 1. An epoch lease has a reference to a node liveness record which the
+ // lease status evaluator is not aware of. This can happen when gossip
+ // is down and node liveness information is not available for the lease
+ // holder. In such cases, it would be unsafe to use the lease because
+ // the evaluator cannot determine the lease's expiration, so it may
+ // have expired. However, it would also be unsafe to replace the lease,
+ // because it may still be valid.
+ // 2. A leader lease is evaluated on a replica that is not the raft leader
+ // and is not aware of a successor raft leader at a future term (either
+ // itself or some other replica). In such cases, the lease may be valid
+ // or it may have expired. The raft leader (+leaseholder) itself would
+ // be able to tell, but the only way for a follower replica to tell is
+ // for it to try to become the raft leader, while respecting raft
+ // fortification rules. In the meantime, it is best for the follower
+ // replica to redirect any requests to the raft leader (+leaseholder).
ERROR = 0;
// VALID indicates that the lease is not expired at the current clock
// time and can be used to serve a given request.
diff --git a/pkg/kv/kvserver/lease_queue_test.go b/pkg/kv/kvserver/lease_queue_test.go
index 8804e1b1c3ea..d551d81686b3 100644
--- a/pkg/kv/kvserver/lease_queue_test.go
+++ b/pkg/kv/kvserver/lease_queue_test.go
@@ -27,13 +27,14 @@ import (
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/listenerutil"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
+ "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
- proto "github.com/gogo/protobuf/proto"
+ "github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require"
)
@@ -83,21 +84,34 @@ func TestLeaseQueueLeasePreferencePurgatoryError(t *testing.T) {
ServerArgsPerNode: serverArgs,
})
defer tc.Stopper().Stop(ctx)
+ tdb := sqlutils.MakeSQLRunner(tc.Conns[0])
- db := tc.Conns[0]
setLeasePreferences := func(node int) {
- _, err := db.Exec(fmt.Sprintf(`ALTER TABLE t CONFIGURE ZONE USING
+ tdb.Exec(t, fmt.Sprintf(`ALTER TABLE t CONFIGURE ZONE USING
num_replicas=3, num_voters=3, voter_constraints='[]', lease_preferences='[[+rack=%d]]'`,
node))
- require.NoError(t, err)
+ }
+
+ checkSplits := func() error {
+ var count int
+ var startSplit bool
+ tdb.QueryRow(t,
+ "SELECT count(*), bool_or(start_key ~ 'TableMin') FROM [SHOW RANGES FROM TABLE t WITH DETAILS];",
+ ).Scan(&count, &startSplit)
+ if count != numRanges {
+ return errors.Errorf("expected %d ranges in table, found %d", numRanges, count)
+ }
+ if !startSplit {
+ return errors.New("expected table to be split at /TableMin")
+ }
+ return nil
}
leaseCount := func(node int) int {
var count int
- err := db.QueryRow(fmt.Sprintf(
+ tdb.QueryRow(t, fmt.Sprintf(
"SELECT count(*) FROM [SHOW RANGES FROM TABLE t WITH DETAILS] WHERE lease_holder = %d", node),
).Scan(&count)
- require.NoError(t, err)
return count
}
@@ -109,25 +123,33 @@ func TestLeaseQueueLeasePreferencePurgatoryError(t *testing.T) {
return nil
}
+ // Shorten the closed timestamp target duration and span config reconciliation
+ // interval so that span configs propagate more rapidly.
+ tdb.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'`)
+ tdb.Exec(t, `SET CLUSTER SETTING kv.rangefeed.closed_timestamp_refresh_interval = '100ms'`)
+ tdb.Exec(t, `SET CLUSTER SETTING spanconfig.reconciliation_job.checkpoint_interval = '100ms'`)
+
// Create a test table with numRanges-1 splits, to end up with numRanges
// ranges. We will use the test table ranges to assert on the purgatory lease
// preference behavior.
- _, err := db.Exec("CREATE TABLE t (i int);")
- require.NoError(t, err)
- _, err = db.Exec(
- fmt.Sprintf("INSERT INTO t(i) select generate_series(1,%d)", numRanges-1))
- require.NoError(t, err)
- _, err = db.Exec("ALTER TABLE t SPLIT AT SELECT i FROM t;")
- require.NoError(t, err)
+ tdb.Exec(t, "CREATE TABLE t (i int);")
+ tdb.Exec(t, fmt.Sprintf("INSERT INTO t(i) select generate_series(1,%d)", numRanges-1))
+ tdb.Exec(t, "ALTER TABLE t SPLIT AT SELECT i FROM t;")
require.NoError(t, tc.WaitForFullReplication())
- // Set a preference on the initial node, then wait until all the leases for
- // the test table are on that node.
+ // Set a preference on the initial node, then wait until:
+ // (1) the first span in the test table (i.e. [/Table/, /Table//1/1))
+ // has been split off into its own range.
+ // (2) all the leases for the test table are on the initial node.
setLeasePreferences(initialPreferredNode)
testutils.SucceedsSoon(t, func() error {
for serverIdx := 0; serverIdx < numNodes; serverIdx++ {
- require.NoError(t, tc.GetFirstStoreFromServer(t, serverIdx).
- ForceLeaseQueueProcess())
+ store := tc.GetFirstStoreFromServer(t, serverIdx)
+ require.NoError(t, store.ForceSplitScanAndProcess())
+ require.NoError(t, store.ForceLeaseQueueProcess())
+ }
+ if err := checkSplits(); err != nil {
+ return err
}
return checkLeaseCount(initialPreferredNode, numRanges)
})
diff --git a/pkg/kv/kvserver/leases/status.go b/pkg/kv/kvserver/leases/status.go
index 64cf08c6b9cb..69b340e3cb56 100644
--- a/pkg/kv/kvserver/leases/status.go
+++ b/pkg/kv/kvserver/leases/status.go
@@ -178,9 +178,6 @@ func Status(ctx context.Context, nl NodeLiveness, i StatusInput) kvserverpb.Leas
// to replace it.
knownSuccessor := i.RaftStatus.Term > lease.Term && i.RaftStatus.Lead != raft.None
if !knownSuccessor {
- // TODO(nvanbenschoten): we could introduce a new INDETERMINATE state
- // for this case, instead of using ERROR. This would look a bit less
- // unexpected.
status.State = kvserverpb.LeaseState_ERROR
status.ErrInfo = "leader lease is not held locally, cannot determine validity"
return status
diff --git a/pkg/kv/kvserver/rangefeed/BUILD.bazel b/pkg/kv/kvserver/rangefeed/BUILD.bazel
index fd556d9e9cfb..cdb82453c0b5 100644
--- a/pkg/kv/kvserver/rangefeed/BUILD.bazel
+++ b/pkg/kv/kvserver/rangefeed/BUILD.bazel
@@ -8,7 +8,6 @@ go_library(
"buffered_sender.go",
"buffered_stream.go",
"catchup_scan.go",
- "event_queue.go",
"event_size.go",
"filter.go",
"metrics.go",
@@ -20,6 +19,7 @@ go_library(
"stream.go",
"task.go",
"test_helpers.go",
+ "testutil.go",
"unbuffered_sender.go",
],
importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rangefeed",
@@ -65,11 +65,9 @@ go_test(
"budget_test.go",
"catchup_scan_bench_test.go",
"catchup_scan_test.go",
- "event_queue_test.go",
"event_size_test.go",
"processor_helpers_test.go",
"processor_test.go",
- "registry_helper_test.go",
"registry_test.go",
"resolved_timestamp_test.go",
"scheduler_test.go",
@@ -95,7 +93,6 @@ go_test(
"//pkg/testutils",
"//pkg/testutils/skip",
"//pkg/testutils/storageutils",
- "//pkg/util/ctxgroup",
"//pkg/util/encoding",
"//pkg/util/hlc",
"//pkg/util/leaktest",
diff --git a/pkg/kv/kvserver/rangefeed/bench_test.go b/pkg/kv/kvserver/rangefeed/bench_test.go
index 5112e0d874fa..9c2eced64126 100644
--- a/pkg/kv/kvserver/rangefeed/bench_test.go
+++ b/pkg/kv/kvserver/rangefeed/bench_test.go
@@ -24,10 +24,10 @@ import (
)
type benchmarkRangefeedOpts struct {
- rangefeedTestType rangefeedTestType
- opType opType
- numRegistrations int
- budget int64
+ procType procType
+ opType opType
+ numRegistrations int
+ budget int64
}
type opType string
@@ -47,10 +47,10 @@ func BenchmarkRangefeed(b *testing.B) {
name := fmt.Sprintf("procType=%s/opType=%s/numRegs=%d", procType, opType, numRegistrations)
b.Run(name, func(b *testing.B) {
runBenchmarkRangefeed(b, benchmarkRangefeedOpts{
- rangefeedTestType: procType,
- opType: opType,
- numRegistrations: numRegistrations,
- budget: math.MaxInt64,
+ procType: procType,
+ opType: opType,
+ numRegistrations: numRegistrations,
+ budget: math.MaxInt64,
})
})
}
@@ -90,7 +90,7 @@ func runBenchmarkRangefeed(b *testing.B, opts benchmarkRangefeedOpts) {
span := roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("z")}
p, h, stopper := newTestProcessor(b, withSpan(span), withBudget(budget), withChanCap(b.N),
- withEventTimeout(time.Hour), withRangefeedTestType(opts.rangefeedTestType))
+ withEventTimeout(time.Hour), withProcType(opts.procType))
defer stopper.Stop(ctx)
// Add registrations.
@@ -103,7 +103,7 @@ func runBenchmarkRangefeed(b *testing.B, opts benchmarkRangefeedOpts) {
// extra data.
const withFiltering = false
streams[i] = &noopStream{ctx: ctx, done: make(chan *kvpb.Error, 1)}
- ok, _ := p.Register(ctx, span, hlc.MinTimestamp, nil,
+ ok, _ := p.Register(span, hlc.MinTimestamp, nil,
withDiff, withFiltering, false, /* withOmitRemote */
streams[i], nil)
require.True(b, ok)
@@ -192,6 +192,10 @@ type noopStream struct {
done chan *kvpb.Error
}
+func (s *noopStream) Context() context.Context {
+ return s.ctx
+}
+
func (s *noopStream) SendUnbuffered(*kvpb.RangeFeedEvent) error {
s.events++
return nil
diff --git a/pkg/kv/kvserver/rangefeed/buffered_registration.go b/pkg/kv/kvserver/rangefeed/buffered_registration.go
index c408faa82974..069b2b31adf4 100644
--- a/pkg/kv/kvserver/rangefeed/buffered_registration.go
+++ b/pkg/kv/kvserver/rangefeed/buffered_registration.go
@@ -70,7 +70,6 @@ type bufferedRegistration struct {
var _ registration = &bufferedRegistration{}
func newBufferedRegistration(
- streamCtx context.Context,
span roachpb.Span,
startTS hlc.Timestamp,
catchUpIter *CatchUpIterator,
@@ -85,7 +84,6 @@ func newBufferedRegistration(
) *bufferedRegistration {
br := &bufferedRegistration{
baseRegistration: baseRegistration{
- streamCtx: streamCtx,
span: span,
catchUpTimestamp: startTS,
withDiff: withDiff,
@@ -214,8 +212,8 @@ func (br *bufferedRegistration) outputLoop(ctx context.Context) error {
}
case <-ctx.Done():
return ctx.Err()
- case <-br.streamCtx.Done():
- return br.streamCtx.Err()
+ case <-br.stream.Context().Done():
+ return br.stream.Context().Err()
}
}
}
diff --git a/pkg/kv/kvserver/rangefeed/buffered_stream.go b/pkg/kv/kvserver/rangefeed/buffered_stream.go
index 4bf2ec2c9002..1ac5a60f415c 100644
--- a/pkg/kv/kvserver/rangefeed/buffered_stream.go
+++ b/pkg/kv/kvserver/rangefeed/buffered_stream.go
@@ -6,6 +6,8 @@
package rangefeed
import (
+ "context"
+
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
)
@@ -25,15 +27,17 @@ type BufferedStream interface {
// similar to PerRangeEventSink but buffers events in BufferedSender before
// forwarding events to the underlying grpc stream.
type BufferedPerRangeEventSink struct {
+ ctx context.Context
rangeID roachpb.RangeID
streamID int64
wrapped *BufferedSender
}
func NewBufferedPerRangeEventSink(
- rangeID roachpb.RangeID, streamID int64, wrapped *BufferedSender,
+ ctx context.Context, rangeID roachpb.RangeID, streamID int64, wrapped *BufferedSender,
) *BufferedPerRangeEventSink {
return &BufferedPerRangeEventSink{
+ ctx: ctx,
rangeID: rangeID,
streamID: streamID,
wrapped: wrapped,
@@ -44,6 +48,10 @@ var _ kvpb.RangeFeedEventSink = (*BufferedPerRangeEventSink)(nil)
var _ Stream = (*BufferedPerRangeEventSink)(nil)
var _ BufferedStream = (*BufferedPerRangeEventSink)(nil)
+func (s *BufferedPerRangeEventSink) Context() context.Context {
+ return s.ctx
+}
+
// SendUnbufferedIsThreadSafe is a no-op declaration method. It is a contract
// that the SendUnbuffered method is thread-safe. Note that
// BufferedSender.SendBuffered and BufferedSender.SendUnbuffered are both
diff --git a/pkg/kv/kvserver/rangefeed/event_queue.go b/pkg/kv/kvserver/rangefeed/event_queue.go
deleted file mode 100644
index 89fd219795fa..000000000000
--- a/pkg/kv/kvserver/rangefeed/event_queue.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2024 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package rangefeed
-
-import (
- "context"
- "sync"
-
- "github.com/cockroachdb/cockroach/pkg/kv/kvpb"
-)
-
-const eventQueueChunkSize = 4096
-
-type sharedMuxEvent struct {
- ev *kvpb.MuxRangeFeedEvent
- alloc *SharedBudgetAllocation
-}
-
-// queueChunk is a queue chunk of a fixed size which eventQueue uses to extend
-// its storage. Chunks are kept in the pool to reduce allocations.
-type queueChunk struct {
- data [eventQueueChunkSize]sharedMuxEvent
- nextChunk *queueChunk
-}
-
-var sharedQueueChunkSyncPool = sync.Pool{
- New: func() interface{} {
- return new(queueChunk)
- },
-}
-
-func getPooledQueueChunk() *queueChunk {
- return sharedQueueChunkSyncPool.Get().(*queueChunk)
-}
-
-func putPooledQueueChunk(e *queueChunk) {
- *e = queueChunk{}
- sharedQueueChunkSyncPool.Put(e)
-}
-
-// eventQueue stores sharedMuxEvents. Internally events are stored in
-// eventQueueChunkSize sized arrays that are added as needed and discarded once
-// reader and writers finish working with it.
-//
-// chunks are pooled in a sync.Pool to reduce the number of allocations.
-//
-// pushBack, popFront, len, and free run in constant time. drain runs in linear
-// time with respect to the number of elements in the queue. This structure is
-// not safe for concurrent use.
-//
-// TODO(ssd): Consider replacing with queue.Queue once that implementation can
-// use a pool.
-type eventQueue struct {
- first, last *queueChunk
- read, write int
- size int
-}
-
-func newEventQueue() *eventQueue {
- chunk := getPooledQueueChunk()
- return &eventQueue{
- first: chunk,
- last: chunk,
- }
-}
-
-func (q *eventQueue) pushBack(e sharedMuxEvent) {
- if q.write == eventQueueChunkSize {
- nextChunk := getPooledQueueChunk()
- q.last.nextChunk = nextChunk
- q.last = nextChunk
- q.write = 0
- }
- q.last.data[q.write] = e
- q.write++
- q.size++
-}
-
-func (q *eventQueue) popFront() (sharedMuxEvent, bool) {
- if q.size == 0 {
- return sharedMuxEvent{}, false
- }
- if q.read == eventQueueChunkSize {
- removed := q.first
- q.first = q.first.nextChunk
- putPooledQueueChunk(removed)
- q.read = 0
- }
- res := q.first.data[q.read]
- q.read++
- q.size--
- return res, true
-}
-
-// free drops references held by the queue.
-func (q *eventQueue) free() {
- q.first = nil
- q.last = nil
- q.read = 0
- q.write = 0
- q.size = 0
-}
-
-// drain releases all allocations held by the queue and then frees it.
-func (q *eventQueue) drain(ctx context.Context) {
- start := q.read
- for chunk := q.first; chunk != nil; {
- max := eventQueueChunkSize
- if chunk.nextChunk == nil {
- max = q.write
- }
- for i := start; i < max; i++ {
- chunk.data[i].alloc.Release(ctx)
- }
- next := chunk.nextChunk
- putPooledQueueChunk(chunk)
- chunk = next
- start = 0
- }
- q.free()
-}
-
-func (q *eventQueue) len() int64 {
- return int64(q.size)
-}
-
-func (q *eventQueue) empty() bool {
- return q.size == 0
-}
diff --git a/pkg/kv/kvserver/rangefeed/event_queue_test.go b/pkg/kv/kvserver/rangefeed/event_queue_test.go
deleted file mode 100644
index 2be1c348da67..000000000000
--- a/pkg/kv/kvserver/rangefeed/event_queue_test.go
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2024 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package rangefeed
-
-import (
- "context"
- "fmt"
- "testing"
-
- "github.com/cockroachdb/cockroach/pkg/kv/kvpb"
- "github.com/cockroachdb/cockroach/pkg/settings/cluster"
- "github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
- "github.com/cockroachdb/cockroach/pkg/util/mon"
- "github.com/cockroachdb/cockroach/pkg/util/randutil"
- "github.com/cockroachdb/cockroach/pkg/util/syncutil"
- "github.com/stretchr/testify/require"
-)
-
-func checkInvariants(t *testing.T, q *eventQueue) {
- if q.first == nil && q.last == nil {
- require.True(t, q.empty())
- } else if q.first != nil && q.last == nil {
- t.Fatal("head is nil but tail is non-nil")
- } else if q.first == nil && q.last != nil {
- t.Fatal("tail is nil but head is non-nil")
- } else {
- // The queue maintains an invariant that it contains no finished chunks.
- if q.first == q.last {
- require.Nil(t, q.first.nextChunk)
- }
- if q.empty() {
- require.Equal(t, q.first, q.last)
- require.Equal(t, q.read, q.write)
- } else {
- require.NotNil(t, q.first)
- require.NotNil(t, q.last)
- }
- }
-}
-
-func TestEventQueue(t *testing.T) {
- pushEmptyEvents := func(t *testing.T, q *eventQueue, count int) {
- for i := 0; i < count; i++ {
- require.Equal(t, i, q.size)
- q.pushBack(sharedMuxEvent{})
- checkInvariants(t, q)
- }
- }
-
- t.Run("basic operation: add one event and remove it", func(t *testing.T) {
- q := newEventQueue()
- defer q.free()
-
- require.True(t, q.empty())
- q.pushBack(sharedMuxEvent{})
- require.False(t, q.empty())
- _, ok := q.popFront()
- require.True(t, ok)
- require.True(t, q.empty())
- })
-
- t.Run("repeatedly popping empty queue should be fine", func(t *testing.T) {
- q := newEventQueue()
- defer q.free()
- _, ok := q.popFront()
- require.False(t, ok)
- _, ok = q.popFront()
- require.False(t, ok)
- require.True(t, q.empty())
- })
-
- t.Run("fill and empty queue", func(t *testing.T) {
- q := newEventQueue()
- eventCount := 10000
- pushEmptyEvents(t, q, eventCount)
- require.Equal(t, int64(eventCount), q.len())
- for eventCount != 0 {
- require.False(t, q.empty())
- checkInvariants(t, q)
- _, ok := q.popFront()
- require.True(t, ok)
- eventCount--
- }
- require.Equal(t, int64(0), q.len())
- require.True(t, q.empty())
- checkInvariants(t, q)
- _, ok := q.popFront()
- require.False(t, ok)
- })
-
- t.Run("free sets queue to nil", func(t *testing.T) {
- q := newEventQueue()
- pushEmptyEvents(t, q, eventQueueChunkSize*2)
- q.free()
- require.Nil(t, q.first)
- require.Nil(t, q.last)
- require.True(t, q.empty())
- })
-
- // Add events and assert they are consumed in fifo order.
- t.Run("queue is FIFO ordered", func(t *testing.T) {
- rng, _ := randutil.NewTestRand()
- q := newEventQueue()
- var lastPop int64 = -1
- var lastPush int64 = -1
- eventCount := 10000
- for eventCount > 0 {
- op := rng.Intn(5)
- if op < 3 {
- v := lastPush + 1
- q.pushBack(sharedMuxEvent{
- ev: &kvpb.MuxRangeFeedEvent{
- StreamID: v,
- },
- })
- lastPush++
- } else {
- e, ok := q.popFront()
- if !ok {
- require.Equal(t, lastPop, lastPush)
- require.True(t, q.empty())
- } else {
- require.Equal(t, lastPop+1, e.ev.StreamID)
- lastPop++
- eventCount--
- }
- }
- }
- })
-
- t.Run("drain releases allocations", func(t *testing.T) {
- ctx := context.Background()
- s := cluster.MakeTestingClusterSettings()
- m := mon.NewMonitor(mon.Options{
- Name: "rangefeed",
- Increment: 1,
- Settings: s,
- })
- eventCount := eventQueueChunkSize + 100
-
- m.Start(ctx, nil, mon.NewStandaloneBudget(int64(eventCount)))
- b := m.MakeBoundAccount()
- f := NewFeedBudget(&b, int64(eventCount), &s.SV)
- require.Equal(t, int64(0), b.Used())
- q := newEventQueue()
-
- for range eventCount {
- a, err := f.TryGet(ctx, 1)
- require.NoError(t, err)
- q.pushBack(sharedMuxEvent{
- alloc: a,
- })
- }
- q.drain(ctx)
- require.Equal(t, int64(0), b.Used())
- })
-}
-
-func BenchmarkEventQueue(b *testing.B) {
- b.ReportAllocs()
- events := eventQueueChunkSize * 2
- b.Run(fmt.Sprintf("pushBack/events=%d", events), func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- q := newEventQueue()
- for range events {
- q.pushBack(sharedMuxEvent{})
- }
- q.free()
- }
- })
-
- b.Run(fmt.Sprintf("popFront/events=%d", events), func(b *testing.B) {
- q := newEventQueue()
- defer q.free()
- for i := 0; i < b.N; i++ {
- b.StopTimer()
- for range events {
- q.pushBack(sharedMuxEvent{})
- }
- b.StartTimer()
-
- for {
- _, ok := q.popFront()
- if !ok {
- break
- }
-
- }
- }
- })
-}
-
-func BenchmarkEventQueueChunkCreation(b *testing.B) {
- q := newEventQueue()
- defer q.free()
-
- evt := sharedMuxEvent{
- ev: &kvpb.MuxRangeFeedEvent{},
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- // Create 10 pages of events
- for range eventQueueChunkSize * 10 {
- q.pushBack(evt)
- }
- // Drain
- for {
- _, ok := q.popFront()
- if !ok {
- break
- }
- }
- // Create 10 more
- for range eventQueueChunkSize * 10 {
- q.pushBack(evt)
- }
- }
-}
-
-// lockedQueue is how we expect callers may implement concurrent use
-// of eventQueue.
-type lockedQueue struct {
- syncutil.Mutex
- q *eventQueue
- notifyC chan struct{}
-}
-
-func (lq *lockedQueue) pushBack(e sharedMuxEvent) {
- lq.Lock()
- lq.q.pushBack(e)
- lq.Unlock()
- select {
- case lq.notifyC <- struct{}{}:
- default:
- }
-}
-
-func (lq *lockedQueue) pop() (sharedMuxEvent, bool) {
- lq.Lock()
- defer lq.Unlock()
- return lq.q.popFront()
-}
-
-func (lq *lockedQueue) popFront() (sharedMuxEvent, bool) {
- e, ok := lq.pop()
- if ok {
- return e, ok
- }
- <-lq.notifyC
- return lq.pop()
-}
-
-// chanQueue is a queue implementation using simple channels for
-// comparison purposes. Note that as it uses a fixed channel buffer,
-// it may block if the benchmark adds too much data before draining.
-type chanQueue struct {
- c chan sharedMuxEvent
-}
-
-func (c *chanQueue) pushBack(e sharedMuxEvent) {
- c.c <- e
-}
-
-func (c *chanQueue) popFront() (sharedMuxEvent, bool) {
- e, ok := <-c.c
- return e, ok
-}
-
-// BenchmarkEventQueueMPSC tries to compare this queue to a simple
-// channel for the MPSC use case.
-func BenchmarkEventQueueMPSC(b *testing.B) {
- ctx := context.Background()
- b.ReportAllocs()
-
- type queue interface {
- pushBack(e sharedMuxEvent)
- popFront() (sharedMuxEvent, bool)
- }
-
- eventsPerWorker := 10 * eventQueueChunkSize
- producerCount := 10
- runBench := func(b *testing.B, q queue) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- g := ctxgroup.WithContext(ctx)
- g.GoCtx(func(ctx context.Context) error {
- expectedEventCount := eventsPerWorker * producerCount
- eventCount := 0
- for {
- _, t := q.popFront()
- if t {
- eventCount++
- }
- if eventCount >= expectedEventCount {
- return nil
- }
- }
- })
-
- for range producerCount {
- g.GoCtx(func(ctx context.Context) error {
- for range eventsPerWorker {
- q.pushBack(sharedMuxEvent{})
- }
- return nil
- })
- }
-
- require.NoError(b, g.Wait())
- }
- }
- b.Run("eventQueue", func(b *testing.B) {
- q := &lockedQueue{
- q: newEventQueue(),
- notifyC: make(chan struct{}, 1),
- }
- defer q.q.free()
- runBench(b, q)
- })
- b.Run("chanQueue", func(b *testing.B) {
- q := &chanQueue{
- c: make(chan sharedMuxEvent, eventQueueChunkSize),
- }
- runBench(b, q)
- })
-}
diff --git a/pkg/kv/kvserver/rangefeed/processor.go b/pkg/kv/kvserver/rangefeed/processor.go
index 0338a24cad72..55e0413e9f09 100644
--- a/pkg/kv/kvserver/rangefeed/processor.go
+++ b/pkg/kv/kvserver/rangefeed/processor.go
@@ -7,6 +7,7 @@ package rangefeed
import (
"context"
+ "fmt"
"sync"
"time"
@@ -19,6 +20,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
+ "github.com/cockroachdb/errors"
)
var (
@@ -56,15 +58,13 @@ var (
)
)
-func newRetryErrBufferCapacityExceeded() error {
- return kvpb.NewRangeFeedRetryError(kvpb.RangeFeedRetryError_REASON_SLOW_CONSUMER)
-}
-
// newErrBufferCapacityExceeded creates an error that is returned to subscribers
// if the rangefeed processor is not able to keep up with the flow of incoming
// events and is forced to drop events in order to not block.
func newErrBufferCapacityExceeded() *kvpb.Error {
- return kvpb.NewError(newRetryErrBufferCapacityExceeded())
+ return kvpb.NewError(
+ kvpb.NewRangeFeedRetryError(kvpb.RangeFeedRetryError_REASON_SLOW_CONSUMER),
+ )
}
// Config encompasses the configuration required to create a Processor.
@@ -77,6 +77,13 @@ type Config struct {
Span roachpb.RSpan
TxnPusher TxnPusher
+ // PushTxnsInterval specifies the interval at which a Processor will push
+ // all transactions in the unresolvedIntentQueue that are above the age
+ // specified by PushTxnsAge.
+ //
+ // This option only applies to LegacyProcessor since ScheduledProcessor is
+ // relying on store to push events to scheduler to initiate transaction push.
+ PushTxnsInterval time.Duration
// PushTxnsAge specifies the age at which a Processor will begin to consider
// a transaction old enough to push.
PushTxnsAge time.Duration
@@ -112,10 +119,16 @@ func (sc *Config) SetDefaults() {
// Some tests don't set the TxnPusher, so we avoid setting a default push txn
// interval in such cases #121429.
if sc.TxnPusher == nil {
+ if sc.PushTxnsInterval != 0 {
+ panic("nil TxnPusher with non-zero PushTxnsInterval")
+ }
if sc.PushTxnsAge != 0 {
panic("nil TxnPusher with non-zero PushTxnsAge")
}
} else {
+ if sc.PushTxnsInterval == 0 {
+ sc.PushTxnsInterval = DefaultPushTxnsInterval
+ }
if sc.PushTxnsAge == 0 {
sc.PushTxnsAge = defaultPushTxnsAge
}
@@ -183,7 +196,6 @@ type Processor interface {
//
// NB: startTS is exclusive; the first possible event will be at startTS.Next().
Register(
- streamCtx context.Context,
span roachpb.RSpan,
startTS hlc.Timestamp, // exclusive
catchUpIter *CatchUpIterator,
@@ -236,7 +248,27 @@ type Processor interface {
func NewProcessor(cfg Config) Processor {
cfg.SetDefaults()
cfg.AmbientContext.AddLogTag("rangefeed", nil)
- return NewScheduledProcessor(cfg)
+ if cfg.Scheduler != nil {
+ return NewScheduledProcessor(cfg)
+ }
+ return NewLegacyProcessor(cfg)
+}
+
+type LegacyProcessor struct {
+ Config
+ reg registry
+ rts resolvedTimestamp
+
+ regC chan *bufferedRegistration
+ unregC chan *bufferedRegistration
+ lenReqC chan struct{}
+ lenResC chan int
+ filterReqC chan struct{}
+ filterResC chan *Filter
+ eventC chan *event
+ spanErrC chan spanErr
+ stopC chan *kvpb.Error
+ stoppedC chan struct{}
}
var eventSyncPool = sync.Pool{
@@ -293,13 +325,664 @@ type syncEvent struct {
testRegCatchupSpan *roachpb.Span
}
+// spanErr is an error across a key span that will disconnect overlapping
+// registrations.
+type spanErr struct {
+ span roachpb.Span
+ pErr *kvpb.Error
+}
+
// logicalOpMetadata is metadata associated with a logical Op.
type logicalOpMetadata struct {
omitInRangefeeds bool
originID uint32
}
+func NewLegacyProcessor(cfg Config) *LegacyProcessor {
+ p := &LegacyProcessor{
+ Config: cfg,
+ reg: makeRegistry(cfg.Metrics),
+ rts: makeResolvedTimestamp(cfg.Settings),
+
+ regC: make(chan *bufferedRegistration),
+ unregC: make(chan *bufferedRegistration),
+ lenReqC: make(chan struct{}),
+ lenResC: make(chan int),
+ filterReqC: make(chan struct{}),
+ filterResC: make(chan *Filter),
+ eventC: make(chan *event, cfg.EventChanCap),
+ spanErrC: make(chan spanErr),
+ stopC: make(chan *kvpb.Error, 1),
+ stoppedC: make(chan struct{}),
+ }
+ return p
+}
+
// IntentScannerConstructor is used to construct an IntentScanner. It
// should be called from underneath a stopper task to ensure that the
// engine has not been closed.
type IntentScannerConstructor func() IntentScanner
+
+// Start implements Processor interface.
+//
+// LegacyProcessor launches a goroutine to process rangefeed events and send
+// them to registrations.
+//
+// Note that to fulfill newRtsIter contract, LegacyProcessor will create
+// iterator at the start of its work loop prior to firing async task.
+func (p *LegacyProcessor) Start(stopper *stop.Stopper, newRtsIter IntentScannerConstructor) error {
+ ctx := p.AnnotateCtx(context.Background())
+ if err := stopper.RunAsyncTask(ctx, "rangefeed.LegacyProcessor", func(ctx context.Context) {
+ p.Metrics.RangeFeedProcessorsGO.Inc(1)
+ defer p.Metrics.RangeFeedProcessorsGO.Dec(1)
+ p.run(ctx, p.RangeID, newRtsIter, stopper)
+ }); err != nil {
+ p.reg.DisconnectWithErr(ctx, all, kvpb.NewError(err))
+ close(p.stoppedC)
+ return err
+ }
+ return nil
+}
+
+// run is called from Start and runs the rangefeed.
+func (p *LegacyProcessor) run(
+ ctx context.Context,
+ _forStacks roachpb.RangeID,
+ rtsIterFunc IntentScannerConstructor,
+ stopper *stop.Stopper,
+) {
+ // Close the memory budget last, or there will be a period of time during
+ // which requests are still ongoing but will run into the closed budget,
+ // causing shutdown noise and busy retries.
+ // Closing the budget after stoppedC ensures that all other goroutines are
+ // (very close to being) shut down by the time the budget goes away.
+ defer p.MemBudget.Close(ctx)
+ defer close(p.stoppedC)
+ ctx, cancelOutputLoops := context.WithCancel(ctx)
+ defer cancelOutputLoops()
+
+ // Launch an async task to scan over the resolved timestamp iterator and
+ // initialize the unresolvedIntentQueue. Ignore error if quiescing.
+ if rtsIterFunc != nil {
+ rtsIter := rtsIterFunc()
+ initScan := newInitResolvedTSScan(p.Span, p, rtsIter)
+ err := stopper.RunAsyncTask(ctx, "rangefeed: init resolved ts", initScan.Run)
+ if err != nil {
+ initScan.Cancel()
+ }
+ } else {
+ p.initResolvedTS(ctx)
+ }
+
+ // txnPushTicker periodically pushes the transaction record of all
+ // unresolved intents that are above a certain age, helping to ensure
+ // that the resolved timestamp continues to make progress.
+ var txnPushTicker *time.Ticker
+ var txnPushTickerC <-chan time.Time
+ var txnPushAttemptC chan struct{}
+ if p.PushTxnsInterval > 0 {
+ txnPushTicker = time.NewTicker(p.PushTxnsInterval)
+ txnPushTickerC = txnPushTicker.C
+ defer txnPushTicker.Stop()
+ }
+
+ for {
+ select {
+
+ // Handle new registrations.
+ case r := <-p.regC:
+ if !p.Span.AsRawSpanWithNoLocals().Contains(r.span) {
+ log.Fatalf(ctx, "registration %s not in Processor's key range %v", r, p.Span)
+ }
+
+ // Add the new registration to the registry.
+ p.reg.Register(ctx, r)
+
+ // Publish an updated filter that includes the new registration.
+ p.filterResC <- p.reg.NewFilter()
+
+ // Immediately publish a checkpoint event to the registry. This will be the first event
+ // published to this registration after its initial catch-up scan completes. The resolved
+ // timestamp might be empty but the checkpoint event is still useful to indicate that the
+ // catch-up scan has completed. This allows clients to rely on stronger ordering semantics
+ // once they observe the first checkpoint event.
+ r.publish(ctx, p.newCheckpointEvent(), nil)
+
+ // Run an output loop for the registry.
+ runOutputLoop := func(ctx context.Context) {
+ r.runOutputLoop(ctx, p.RangeID)
+ select {
+ case p.unregC <- r:
+ if r.unreg != nil {
+ r.unreg()
+ }
+ case <-p.stoppedC:
+ }
+ }
+ if err := stopper.RunAsyncTask(ctx, "rangefeed: output loop", runOutputLoop); err != nil {
+ r.disconnect(kvpb.NewError(err))
+ p.reg.Unregister(ctx, r)
+ }
+
+ // Respond to unregistration requests; these come from registrations that
+ // encounter an error during their output loop.
+ case r := <-p.unregC:
+ p.reg.Unregister(ctx, r)
+
+ // Send errors to registrations overlapping the span and disconnect them.
+ // Requested via DisconnectSpanWithErr().
+ case e := <-p.spanErrC:
+ p.reg.DisconnectWithErr(ctx, e.span, e.pErr)
+
+ // Respond to answers about the processor goroutine state.
+ case <-p.lenReqC:
+ p.lenResC <- p.reg.Len()
+
+ // Respond to answers about which operations can be filtered before
+ // reaching the Processor.
+ case <-p.filterReqC:
+ p.filterResC <- p.reg.NewFilter()
+
+ // Transform and route events.
+ case e := <-p.eventC:
+ p.consumeEvent(ctx, e)
+ e.alloc.Release(ctx)
+ putPooledEvent(e)
+
+ // Check whether any unresolved intents need a push.
+ case <-txnPushTickerC:
+ // Don't perform transaction push attempts if disabled, until the resolved
+ // timestamp has been initialized, or if we're not tracking any intents.
+ if !PushTxnsEnabled.Get(&p.Settings.SV) || !p.rts.IsInit() || p.rts.intentQ.Len() == 0 {
+ continue
+ }
+
+ now := p.Clock.Now()
+ before := now.Add(-p.PushTxnsAge.Nanoseconds(), 0)
+ oldTxns := p.rts.intentQ.Before(before)
+
+ if len(oldTxns) > 0 {
+ toPush := make([]enginepb.TxnMeta, len(oldTxns))
+ for i, txn := range oldTxns {
+ toPush[i] = txn.asTxnMeta()
+ }
+
+ // Set the ticker channel to nil so that it can't trigger a
+ // second concurrent push. Create a push attempt response
+ // channel that is closed when the push attempt completes.
+ txnPushTickerC = nil
+ txnPushAttemptC = make(chan struct{})
+
+ // Launch an async transaction push attempt that pushes the
+ // timestamp of all transactions beneath the push offset.
+ // Ignore error if quiescing.
+ pushTxns := newTxnPushAttempt(p.Settings, p.Span, p.TxnPusher, p, toPush, now, func() {
+ close(txnPushAttemptC)
+ })
+ err := stopper.RunAsyncTask(ctx, "rangefeed: pushing old txns", pushTxns.Run)
+ if err != nil {
+ pushTxns.Cancel()
+ }
+ }
+
+ // Update the resolved timestamp based on the push attempt.
+ case <-txnPushAttemptC:
+ // Reset the ticker channel so that it can trigger push attempts
+ // again. Set the push attempt channel back to nil.
+ txnPushTickerC = txnPushTicker.C
+ txnPushAttemptC = nil
+
+ // Close registrations and exit when signaled.
+ case pErr := <-p.stopC:
+ p.reg.DisconnectAllOnShutdown(ctx, pErr)
+ return
+
+ // Exit on stopper.
+ case <-stopper.ShouldQuiesce():
+ pErr := kvpb.NewError(&kvpb.NodeUnavailableError{})
+ p.reg.DisconnectAllOnShutdown(ctx, pErr)
+ return
+ }
+ }
+}
+
+// Stop implements Processor interface.
+func (p *LegacyProcessor) Stop() {
+ p.StopWithErr(nil)
+}
+
+// StopWithErr implements Processor interface.
+func (p *LegacyProcessor) StopWithErr(pErr *kvpb.Error) {
+ // Flush any remaining events before stopping.
+ p.syncEventC()
+ // Send the processor a stop signal.
+ p.sendStop(pErr)
+}
+
+// DisconnectSpanWithErr implements Processor interface.
+func (p *LegacyProcessor) DisconnectSpanWithErr(span roachpb.Span, pErr *kvpb.Error) {
+ select {
+ case p.spanErrC <- spanErr{span: span, pErr: pErr}:
+ case <-p.stoppedC:
+ // Already stopped. Do nothing.
+ }
+}
+
+func (p *LegacyProcessor) sendStop(pErr *kvpb.Error) {
+ select {
+ case p.stopC <- pErr:
+ // stopC has non-zero capacity so this should not block unless
+ // multiple callers attempt to stop the Processor concurrently.
+ case <-p.stoppedC:
+ // Already stopped. Do nothing.
+ }
+}
+
+// Register implements Processor interface.
+func (p *LegacyProcessor) Register(
+ span roachpb.RSpan,
+ startTS hlc.Timestamp,
+ catchUpIter *CatchUpIterator,
+ withDiff bool,
+ withFiltering bool,
+ withOmitRemote bool,
+ stream Stream,
+ disconnectFn func(),
+) (bool, *Filter) {
+ // Synchronize the event channel so that this registration doesn't see any
+ // events that were consumed before this registration was called. Instead,
+ // it should see these events during its catch up scan.
+ p.syncEventC()
+
+ blockWhenFull := p.Config.EventChanTimeout == 0 // for testing
+ r := newBufferedRegistration(
+ span.AsRawSpanWithNoLocals(), startTS, catchUpIter, withDiff, withFiltering, withOmitRemote,
+ p.Config.EventChanCap, blockWhenFull, p.Metrics, stream, disconnectFn,
+ )
+ select {
+ case p.regC <- r:
+ // Wait for response.
+ return true, <-p.filterResC
+ case <-p.stoppedC:
+ return false, nil
+ }
+}
+
+// Len implements Processor interface.
+func (p *LegacyProcessor) Len() int {
+ // Ask the processor goroutine.
+ select {
+ case p.lenReqC <- struct{}{}:
+ // Wait for response.
+ return <-p.lenResC
+ case <-p.stoppedC:
+ return 0
+ }
+}
+
+// Filter implements Processor interface.
+func (p *LegacyProcessor) Filter() *Filter {
+ // Ask the processor goroutine.
+ select {
+ case p.filterReqC <- struct{}{}:
+ // Wait for response.
+ return <-p.filterResC
+ case <-p.stoppedC:
+ return nil
+ }
+}
+
+// ConsumeLogicalOps implements Processor interface.
+func (p *LegacyProcessor) ConsumeLogicalOps(
+ ctx context.Context, ops ...enginepb.MVCCLogicalOp,
+) bool {
+ if len(ops) == 0 {
+ return true
+ }
+ return p.sendEvent(ctx, event{ops: ops}, p.EventChanTimeout)
+}
+
+// ConsumeSSTable implements Processor interface.
+func (p *LegacyProcessor) ConsumeSSTable(
+ ctx context.Context, sst []byte, sstSpan roachpb.Span, writeTS hlc.Timestamp,
+) bool {
+ return p.sendEvent(ctx, event{sst: &sstEvent{sst, sstSpan, writeTS}}, p.EventChanTimeout)
+}
+
+// ForwardClosedTS implements Processor interface.
+func (p *LegacyProcessor) ForwardClosedTS(ctx context.Context, closedTS hlc.Timestamp) bool {
+ if closedTS.IsEmpty() {
+ return true
+ }
+ return p.sendEvent(ctx, event{ct: ctEvent{closedTS}}, p.EventChanTimeout)
+}
+
+// sendEvent informs the Processor of a new event. If a timeout is specified,
+// the method will wait for no longer than that duration before giving up,
+// shutting down the Processor, and returning false. 0 for no timeout.
+func (p *LegacyProcessor) sendEvent(ctx context.Context, e event, timeout time.Duration) bool {
+ // The code is a bit unwieldy because we try to avoid any allocations on fast
+ // path where we have enough budget and outgoing channel is free. If not, we
+ // try to set up timeout for acquiring budget and then reuse this timeout when
+ // inserting value into channel.
+ var alloc *SharedBudgetAllocation
+ if p.MemBudget != nil {
+ size := calculateDateEventSize(e)
+ if size > 0 {
+ var err error
+ // First we will try non-blocking fast path to allocate memory budget.
+ alloc, err = p.MemBudget.TryGet(ctx, size)
+ // If budget is already closed, then just let it through because processor
+ // is terminating.
+ if err != nil && !errors.Is(err, budgetClosedError) {
+ // Since we don't have enough budget, we should try to wait for
+ // allocation returns before failing.
+ if timeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, timeout) // nolint:context
+ defer cancel()
+ // We reset timeout here so that subsequent channel write op doesn't
+ // try to wait beyond what is already set up.
+ timeout = 0
+ }
+ p.Metrics.RangeFeedBudgetBlocked.Inc(1)
+ alloc, err = p.MemBudget.WaitAndGet(ctx, size)
+ }
+ if err != nil && !errors.Is(err, budgetClosedError) {
+ p.Metrics.RangeFeedBudgetExhausted.Inc(1)
+ p.sendStop(newErrBufferCapacityExceeded())
+ return false
+ }
+ // Always release allocation pointer after sending as it is nil safe.
+ // In normal case its value is moved into event, in case of allocation
+ // errors it is nil, in case of send errors it is non-nil and this call
+ // ensures that unused allocation is released.
+ defer func() {
+ alloc.Release(ctx)
+ }()
+ }
+ }
+ ev := getPooledEvent(e)
+ ev.alloc = alloc
+ if timeout == 0 {
+ // Timeout is zero if no timeout was requested or timeout is already set on
+ // the context by budget allocation. Just try to write using context as a
+ // timeout.
+ select {
+ case p.eventC <- ev:
+ // Reset allocation after successful posting to prevent deferred cleanup
+ // from freeing it (see comment on defer for explanation).
+ alloc = nil
+ case <-p.stoppedC:
+ // Already stopped. Do nothing.
+ case <-ctx.Done():
+ p.sendStop(newErrBufferCapacityExceeded())
+ return false
+ }
+ } else {
+ // First try fast path operation without blocking and without creating any
+ // contexts in case channel has capacity.
+ select {
+ case p.eventC <- ev:
+ // Reset allocation after successful posting to prevent deferred cleanup
+ // from freeing it (see comment on defer for explanation).
+ alloc = nil
+ case <-p.stoppedC:
+ // Already stopped. Do nothing.
+ default:
+ // Fast path failed since we don't have capacity in channel. Wait for
+ // slots to clear up using context timeout.
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, timeout) // nolint:context
+ defer cancel()
+ select {
+ case p.eventC <- ev:
+ // Reset allocation after successful posting to prevent deferred cleanup
+ // from freeing it (see comment on defer for explanation).
+ alloc = nil
+ case <-p.stoppedC:
+ // Already stopped. Do nothing.
+ case <-ctx.Done():
+ // Sending on the eventC channel would have blocked.
+ // Instead, tear down the processor and return immediately.
+ p.sendStop(newErrBufferCapacityExceeded())
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// setResolvedTSInitialized informs the Processor that its resolved timestamp has
+// all the information it needs to be considered initialized.
+func (p *LegacyProcessor) setResolvedTSInitialized(ctx context.Context) {
+ p.sendEvent(ctx, event{initRTS: true}, 0)
+}
+
+// syncEventC synchronizes access to the Processor goroutine, allowing the
+// caller to establish causality with actions taken by the Processor goroutine.
+// It does so by flushing the event pipeline.
+func (p *LegacyProcessor) syncEventC() {
+ p.syncEventCWithEvent(&syncEvent{c: make(chan struct{})})
+}
+
+// syncEventCWithEvent allows sync event to be sent and waited on its channel.
+// Exposed to allow special test syncEvents that contain span to be sent.
+func (p *LegacyProcessor) syncEventCWithEvent(se *syncEvent) {
+ ev := getPooledEvent(event{sync: se})
+ select {
+ case p.eventC <- ev:
+ select {
+ case <-se.c:
+ // Synchronized.
+ case <-p.stoppedC:
+ // Already stopped. Do nothing.
+ }
+ case <-p.stoppedC:
+ // Already stopped. Return event back to the pool.
+ putPooledEvent(ev)
+ }
+}
+
+func (p *LegacyProcessor) consumeEvent(ctx context.Context, e *event) {
+ switch {
+ case e.ops != nil:
+ p.consumeLogicalOps(ctx, e.ops, e.alloc)
+ case !e.ct.IsEmpty():
+ p.forwardClosedTS(ctx, e.ct.Timestamp)
+ case bool(e.initRTS):
+ p.initResolvedTS(ctx)
+ case e.sst != nil:
+ p.consumeSSTable(ctx, e.sst.data, e.sst.span, e.sst.ts, e.alloc)
+ case e.sync != nil:
+ if e.sync.testRegCatchupSpan != nil {
+ if err := p.reg.waitForCaughtUp(ctx, *e.sync.testRegCatchupSpan); err != nil {
+ log.Errorf(
+ ctx,
+ "error waiting for registries to catch up during test, results might be impacted: %s",
+ err,
+ )
+ }
+ }
+ close(e.sync.c)
+ default:
+ panic(fmt.Sprintf("missing event variant: %+v", e))
+ }
+}
+
+func (p *LegacyProcessor) consumeLogicalOps(
+ ctx context.Context, ops []enginepb.MVCCLogicalOp, alloc *SharedBudgetAllocation,
+) {
+ for _, op := range ops {
+ // Publish RangeFeedValue updates, if necessary.
+ switch t := op.GetValue().(type) {
+ // OmitInRangefeeds is relevant only for transactional writes, so it's
+ // propagated only in the case of a MVCCCommitIntentOp and
+ // MVCCWriteValueOp (could be the result of a 1PC write).
+ case *enginepb.MVCCWriteValueOp:
+ // Publish the new value directly.
+ p.publishValue(ctx, t.Key, t.Timestamp, t.Value, t.PrevValue, logicalOpMetadata{omitInRangefeeds: t.OmitInRangefeeds, originID: t.OriginID}, alloc)
+
+ case *enginepb.MVCCDeleteRangeOp:
+ // Publish the range deletion directly.
+ p.publishDeleteRange(ctx, t.StartKey, t.EndKey, t.Timestamp, alloc)
+
+ case *enginepb.MVCCWriteIntentOp:
+ // No updates to publish.
+
+ case *enginepb.MVCCUpdateIntentOp:
+ // No updates to publish.
+
+ case *enginepb.MVCCCommitIntentOp:
+ // Publish the newly committed value.
+ p.publishValue(ctx, t.Key, t.Timestamp, t.Value, t.PrevValue, logicalOpMetadata{omitInRangefeeds: t.OmitInRangefeeds, originID: t.OriginID}, alloc)
+
+ case *enginepb.MVCCAbortIntentOp:
+ // No updates to publish.
+
+ case *enginepb.MVCCAbortTxnOp:
+ // No updates to publish.
+
+ default:
+ panic(errors.AssertionFailedf("unknown logical op %T", t))
+ }
+
+ // Determine whether the operation caused the resolved timestamp to
+ // move forward. If so, publish a RangeFeedCheckpoint notification.
+ if p.rts.ConsumeLogicalOp(ctx, op) {
+ p.publishCheckpoint(ctx)
+ }
+ }
+}
+
+func (p *LegacyProcessor) consumeSSTable(
+ ctx context.Context,
+ sst []byte,
+ sstSpan roachpb.Span,
+ sstWTS hlc.Timestamp,
+ alloc *SharedBudgetAllocation,
+) {
+ p.publishSSTable(ctx, sst, sstSpan, sstWTS, alloc)
+}
+
+func (p *LegacyProcessor) forwardClosedTS(ctx context.Context, newClosedTS hlc.Timestamp) {
+ if p.rts.ForwardClosedTS(ctx, newClosedTS) {
+ p.publishCheckpoint(ctx)
+ }
+}
+
+func (p *LegacyProcessor) initResolvedTS(ctx context.Context) {
+ if p.rts.Init(ctx) {
+ p.publishCheckpoint(ctx)
+ }
+}
+
+func (p *LegacyProcessor) publishValue(
+ ctx context.Context,
+ key roachpb.Key,
+ timestamp hlc.Timestamp,
+ value, prevValue []byte,
+ valueMetadata logicalOpMetadata,
+ alloc *SharedBudgetAllocation,
+) {
+ if !p.Span.ContainsKey(roachpb.RKey(key)) {
+ log.Fatalf(ctx, "key %v not in Processor's key range %v", key, p.Span)
+ }
+
+ var prevVal roachpb.Value
+ if prevValue != nil {
+ prevVal.RawBytes = prevValue
+ }
+ var event kvpb.RangeFeedEvent
+ event.MustSetValue(&kvpb.RangeFeedValue{
+ Key: key,
+ Value: roachpb.Value{
+ RawBytes: value,
+ Timestamp: timestamp,
+ },
+ PrevValue: prevVal,
+ })
+ p.reg.PublishToOverlapping(ctx, roachpb.Span{Key: key}, &event, valueMetadata, alloc)
+}
+
+func (p *LegacyProcessor) publishDeleteRange(
+ ctx context.Context,
+ startKey, endKey roachpb.Key,
+ timestamp hlc.Timestamp,
+ alloc *SharedBudgetAllocation,
+) {
+ span := roachpb.Span{Key: startKey, EndKey: endKey}
+ if !p.Span.ContainsKeyRange(roachpb.RKey(startKey), roachpb.RKey(endKey)) {
+ log.Fatalf(ctx, "span %s not in Processor's key range %v", span, p.Span)
+ }
+
+ var event kvpb.RangeFeedEvent
+ event.MustSetValue(&kvpb.RangeFeedDeleteRange{
+ Span: span,
+ Timestamp: timestamp,
+ })
+ p.reg.PublishToOverlapping(ctx, span, &event, logicalOpMetadata{}, alloc)
+}
+
+func (p *LegacyProcessor) publishSSTable(
+ ctx context.Context,
+ sst []byte,
+ sstSpan roachpb.Span,
+ sstWTS hlc.Timestamp,
+ alloc *SharedBudgetAllocation,
+) {
+ if sstSpan.Equal(roachpb.Span{}) {
+ panic(errors.AssertionFailedf("received SSTable without span"))
+ }
+ if sstWTS.IsEmpty() {
+ panic(errors.AssertionFailedf("received SSTable without write timestamp"))
+ }
+ p.reg.PublishToOverlapping(ctx, sstSpan, &kvpb.RangeFeedEvent{
+ SST: &kvpb.RangeFeedSSTable{
+ Data: sst,
+ Span: sstSpan,
+ WriteTS: sstWTS,
+ },
+ }, logicalOpMetadata{}, alloc)
+}
+
+func (p *LegacyProcessor) publishCheckpoint(ctx context.Context) {
+ // TODO(nvanbenschoten): persist resolvedTimestamp. Give Processor a client.DB.
+ // TODO(nvanbenschoten): rate limit these? send them periodically?
+
+ event := p.newCheckpointEvent()
+ p.reg.PublishToOverlapping(ctx, all, event, logicalOpMetadata{}, nil)
+}
+
+func (p *LegacyProcessor) newCheckpointEvent() *kvpb.RangeFeedEvent {
+ // Create a RangeFeedCheckpoint over the Processor's entire span. Each
+ // individual registration will trim this down to just the key span that
+ // it is listening on in registration.maybeStripEvent before publishing.
+ var event kvpb.RangeFeedEvent
+ event.MustSetValue(&kvpb.RangeFeedCheckpoint{
+ Span: p.Span.AsRawSpanWithNoLocals(),
+ ResolvedTS: p.rts.Get(),
+ })
+ return &event
+}
+
+// ID implements Processor interface.
+func (p *LegacyProcessor) ID() int64 {
+ return 0
+}
+
+// calculateDateEventSize returns estimated size of the event that contain actual
+// data. We only account for logical ops and sst's. Those events come from raft
+// and are budgeted. Other events come from processor jobs and update timestamps
+// we don't take them into account as they are supposed to be small and to avoid
+// complexity of having multiple producers getting from budget.
+func calculateDateEventSize(e event) int64 {
+ var size int64
+ for _, op := range e.ops {
+ size += int64(op.Size())
+ }
+ if e.sst != nil {
+ size += int64(len(e.sst.data))
+ }
+ return size
+}
diff --git a/pkg/kv/kvserver/rangefeed/processor_helpers_test.go b/pkg/kv/kvserver/rangefeed/processor_helpers_test.go
index 88ca84a8a3fc..848879018188 100644
--- a/pkg/kv/kvserver/rangefeed/processor_helpers_test.go
+++ b/pkg/kv/kvserver/rangefeed/processor_helpers_test.go
@@ -233,40 +233,41 @@ func (h *processorTestHelper) triggerTxnPushUntilPushed(t *testing.T, pushedC <-
}
}
-type rangefeedTestType bool
+type procType bool
-var (
- scheduledProcessorWithUnbufferedSender rangefeedTestType
+const (
+ legacyProcessor procType = false
+ schedulerProcessor = true
)
-var testTypes = []rangefeedTestType{
- scheduledProcessorWithUnbufferedSender,
-}
+var testTypes = []procType{legacyProcessor, schedulerProcessor}
-// NB: When adding new types, please keep make sure existing
-// benchmarks will keep their old name.
-func (t rangefeedTestType) String() string {
- return "scheduled"
+func (t procType) String() string {
+ if t {
+ return "scheduler"
+ }
+ return "legacy"
}
type testConfig struct {
Config
- isc IntentScannerConstructor
- feedType rangefeedTestType
+ useScheduler bool
+ isc IntentScannerConstructor
}
type option func(*testConfig)
func withPusher(txnPusher TxnPusher) option {
return func(config *testConfig) {
+ config.PushTxnsInterval = 10 * time.Millisecond
config.PushTxnsAge = 50 * time.Millisecond
config.TxnPusher = txnPusher
}
}
-func withRangefeedTestType(t rangefeedTestType) option {
+func withProcType(t procType) option {
return func(config *testConfig) {
- config.feedType = t
+ config.useScheduler = bool(t)
}
}
@@ -324,6 +325,7 @@ func withSettings(st *cluster.Settings) option {
func withPushTxnsIntervalAge(interval, age time.Duration) option {
return func(config *testConfig) {
+ config.PushTxnsInterval = interval
config.PushTxnsAge = age
}
}
@@ -399,25 +401,34 @@ func newTestProcessor(
for _, o := range opts {
o(&cfg)
}
- sch := NewScheduler(SchedulerConfig{
- Workers: 1,
- PriorityWorkers: 1,
- Metrics: NewSchedulerMetrics(time.Second),
- })
- require.NoError(t, sch.Start(context.Background(), stopper))
- cfg.Scheduler = sch
- // Also create a dummy priority processor to populate priorityIDs for
- // BenchmarkRangefeed. It should never be called.
- noop := func(e processorEventType) processorEventType {
- if e != Stopped {
- t.Errorf("unexpected event %s for noop priority processor", e)
+ if cfg.useScheduler {
+ sch := NewScheduler(SchedulerConfig{
+ Workers: 1,
+ PriorityWorkers: 1,
+ Metrics: NewSchedulerMetrics(time.Second),
+ })
+ require.NoError(t, sch.Start(context.Background(), stopper))
+ cfg.Scheduler = sch
+ // Also create a dummy priority processor to populate priorityIDs for
+ // BenchmarkRangefeed. It should never be called.
+ noop := func(e processorEventType) processorEventType {
+ if e != Stopped {
+ t.Errorf("unexpected event %s for noop priority processor", e)
+ }
+ return 0
}
- return 0
+ require.NoError(t, sch.register(9, noop, true /* priority */))
}
- require.NoError(t, sch.register(9, noop, true /* priority */))
s := NewProcessor(cfg.Config)
h := processorTestHelper{}
switch p := s.(type) {
+ case *LegacyProcessor:
+ h.rts = &p.rts
+ h.span = p.Span
+ h.syncEventC = p.syncEventC
+ h.sendSpanSync = func(span *roachpb.Span) {
+ p.syncEventCWithEvent(&syncEvent{c: make(chan struct{}), testRegCatchupSpan: span})
+ }
case *ScheduledProcessor:
h.rts = &p.rts
h.span = p.Span
diff --git a/pkg/kv/kvserver/rangefeed/processor_test.go b/pkg/kv/kvserver/rangefeed/processor_test.go
index ad87bf37c100..92d7f6f282b6 100644
--- a/pkg/kv/kvserver/rangefeed/processor_test.go
+++ b/pkg/kv/kvserver/rangefeed/processor_test.go
@@ -35,8 +35,8 @@ import (
func TestProcessorBasic(t *testing.T) {
defer leaktest.AfterTest(t)()
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
- p, h, stopper := newTestProcessor(t, withRangefeedTestType(rt))
+ testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) {
+ p, h, stopper := newTestProcessor(t, withProcType(pt))
ctx := context.Background()
defer stopper.Stop(ctx)
@@ -62,7 +62,6 @@ func TestProcessorBasic(t *testing.T) {
// Add a registration.
r1Stream := newTestStream()
r1OK, r1Filter := p.Register(
- r1Stream.ctx,
roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -197,7 +196,6 @@ func TestProcessorBasic(t *testing.T) {
// Add another registration with withDiff = true and withFiltering = true.
r2Stream := newTestStream()
r2OK, r1And2Filter := p.Register(
- r2Stream.ctx,
roachpb.RSpan{Key: roachpb.RKey("c"), EndKey: roachpb.RKey("z")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -299,53 +297,36 @@ func TestProcessorBasic(t *testing.T) {
}
require.Equal(t, valEvent3, r1Stream.Events())
// r2Stream should not see the event.
+
// Cancel the first registration.
r1Stream.Cancel()
require.NotNil(t, r1Stream.WaitForError(t))
- // Disconnect the registration via Disconnect should work.
- r3Stream := newTestStream()
- r30K, _ := p.Register(
- r3Stream.ctx,
- roachpb.RSpan{Key: roachpb.RKey("c"), EndKey: roachpb.RKey("z")},
- hlc.Timestamp{WallTime: 1},
- nil, /* catchUpIter */
- false, /* withDiff */
- false, /* withFiltering */
- false, /* withOmitRemote */
- r3Stream,
- func() {},
- )
- require.True(t, r30K)
- r3Stream.Disconnect(kvpb.NewError(fmt.Errorf("disconnection error")))
- require.NotNil(t, r3Stream.WaitForError(t))
-
// Stop the processor with an error.
pErr := kvpb.NewErrorf("stop err")
p.StopWithErr(pErr)
require.NotNil(t, r2Stream.WaitForError(t))
// Adding another registration should fail.
- r4Stream := newTestStream()
- r4OK, _ := p.Register(
- r3Stream.ctx,
+ r3Stream := newTestStream()
+ r3OK, _ := p.Register(
roachpb.RSpan{Key: roachpb.RKey("c"), EndKey: roachpb.RKey("z")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
false, /* withDiff */
false, /* withFiltering */
false, /* withOmitRemote */
- r4Stream,
+ r3Stream,
func() {},
)
- require.False(t, r4OK)
+ require.False(t, r3OK)
})
}
func TestProcessorOmitRemote(t *testing.T) {
defer leaktest.AfterTest(t)()
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
- p, h, stopper := newTestProcessor(t, withRangefeedTestType(rt))
+ testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) {
+ p, h, stopper := newTestProcessor(t, withProcType(pt))
ctx := context.Background()
defer stopper.Stop(ctx)
@@ -354,7 +335,6 @@ func TestProcessorOmitRemote(t *testing.T) {
// Add a registration.
r1Stream := newTestStream()
r1OK, _ := p.Register(
- r1Stream.ctx,
roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -380,7 +360,6 @@ func TestProcessorOmitRemote(t *testing.T) {
// Add another registration with withOmitRemote = true.
r2Stream := newTestStream()
r2OK, _ := p.Register(
- r2Stream.ctx,
roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -426,15 +405,14 @@ func TestProcessorOmitRemote(t *testing.T) {
func TestProcessorSlowConsumer(t *testing.T) {
defer leaktest.AfterTest(t)()
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
- p, h, stopper := newTestProcessor(t, withRangefeedTestType(rt))
+ testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) {
+ p, h, stopper := newTestProcessor(t, withProcType(pt))
ctx := context.Background()
defer stopper.Stop(ctx)
// Add a registration.
r1Stream := newTestStream()
_, _ = p.Register(
- r1Stream.ctx,
roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -446,7 +424,6 @@ func TestProcessorSlowConsumer(t *testing.T) {
)
r2Stream := newTestStream()
p.Register(
- r2Stream.ctx,
roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("z")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -531,19 +508,18 @@ func TestProcessorSlowConsumer(t *testing.T) {
// result of budget exhaustion.
func TestProcessorMemoryBudgetExceeded(t *testing.T) {
defer leaktest.AfterTest(t)()
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) {
fb := newTestBudget(40)
m := NewMetrics()
p, h, stopper := newTestProcessor(t, withBudget(fb), withChanTimeout(time.Millisecond),
- withMetrics(m), withRangefeedTestType(rt))
+ withMetrics(m), withProcType(pt))
ctx := context.Background()
defer stopper.Stop(ctx)
// Add a registration.
r1Stream := newTestStream()
_, _ = p.Register(
- r1Stream.ctx,
roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -589,17 +565,16 @@ func TestProcessorMemoryBudgetExceeded(t *testing.T) {
// TestProcessorMemoryBudgetReleased that memory budget is correctly released.
func TestProcessorMemoryBudgetReleased(t *testing.T) {
defer leaktest.AfterTest(t)()
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) {
fb := newTestBudget(250)
p, h, stopper := newTestProcessor(t, withBudget(fb), withChanTimeout(15*time.Minute),
- withRangefeedTestType(rt))
+ withProcType(pt))
ctx := context.Background()
defer stopper.Stop(ctx)
// Add a registration.
r1Stream := newTestStream()
p.Register(
- r1Stream.ctx,
roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -640,7 +615,7 @@ func TestProcessorMemoryBudgetReleased(t *testing.T) {
func TestProcessorInitializeResolvedTimestamp(t *testing.T) {
defer leaktest.AfterTest(t)()
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) {
txn1 := makeTxn("txn1", uuid.MakeV4(), isolation.Serializable, hlc.Timestamp{})
txn2 := makeTxn("txn2", uuid.MakeV4(), isolation.Serializable, hlc.Timestamp{})
txnWithTs := func(txn roachpb.Transaction, ts int64) *roachpb.Transaction {
@@ -670,7 +645,7 @@ func TestProcessorInitializeResolvedTimestamp(t *testing.T) {
require.NoError(t, err, "failed to prepare test data")
defer cleanup()
- p, h, stopper := newTestProcessor(t, withRtsScanner(scanner), withRangefeedTestType(rt))
+ p, h, stopper := newTestProcessor(t, withRtsScanner(scanner), withProcType(pt))
ctx := context.Background()
defer stopper.Stop(ctx)
@@ -681,7 +656,6 @@ func TestProcessorInitializeResolvedTimestamp(t *testing.T) {
// Add a registration.
r1Stream := newTestStream()
p.Register(
- r1Stream.ctx,
roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -741,7 +715,7 @@ func TestProcessorInitializeResolvedTimestamp(t *testing.T) {
func TestProcessorTxnPushAttempt(t *testing.T) {
defer leaktest.AfterTest(t)()
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) {
ts10 := hlc.Timestamp{WallTime: 10}
ts20 := hlc.Timestamp{WallTime: 20}
ts25 := hlc.Timestamp{WallTime: 25}
@@ -843,7 +817,7 @@ func TestProcessorTxnPushAttempt(t *testing.T) {
return nil
})
- p, h, stopper := newTestProcessor(t, withPusher(&tp), withRangefeedTestType(rt))
+ p, h, stopper := newTestProcessor(t, withPusher(&tp), withProcType(pt))
ctx := context.Background()
defer stopper.Stop(ctx)
@@ -982,12 +956,12 @@ func TestProcessorTxnPushDisabled(t *testing.T) {
// not then it would be possible for them to deadlock.
func TestProcessorConcurrentStop(t *testing.T) {
defer leaktest.AfterTest(t)()
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) {
ctx := context.Background()
const trials = 10
for i := 0; i < trials; i++ {
- p, h, stopper := newTestProcessor(t, withRangefeedTestType(rt))
+ p, h, stopper := newTestProcessor(t, withProcType(pt))
var wg sync.WaitGroup
wg.Add(6)
@@ -995,7 +969,7 @@ func TestProcessorConcurrentStop(t *testing.T) {
defer wg.Done()
runtime.Gosched()
s := newTestStream()
- p.Register(s.ctx, h.span, hlc.Timestamp{}, nil, /* catchUpIter */
+ p.Register(h.span, hlc.Timestamp{}, nil, /* catchUpIter */
false /* withDiff */, false /* withFiltering */, false /* withOmitRemote */, s, func() {})
}()
go func() {
@@ -1033,9 +1007,9 @@ func TestProcessorConcurrentStop(t *testing.T) {
// observes only operations that are consumed after it has registered.
func TestProcessorRegistrationObservesOnlyNewEvents(t *testing.T) {
defer leaktest.AfterTest(t)()
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) {
- p, h, stopper := newTestProcessor(t, withRangefeedTestType(rt))
+ p, h, stopper := newTestProcessor(t, withProcType(pt))
ctx := context.Background()
defer stopper.Stop(ctx)
@@ -1067,7 +1041,7 @@ func TestProcessorRegistrationObservesOnlyNewEvents(t *testing.T) {
// operation is should see is firstIdx.
s := newTestStream()
regs[s] = firstIdx
- p.Register(s.ctx, h.span, hlc.Timestamp{}, nil, /* catchUpIter */
+ p.Register(h.span, hlc.Timestamp{}, nil, /* catchUpIter */
false /* withDiff */, false /* withFiltering */, false /* withOmitRemote */, s, func() {})
regDone <- struct{}{}
}
@@ -1099,7 +1073,7 @@ func TestBudgetReleaseOnProcessorStop(t *testing.T) {
// as sync events used to flush queues.
const channelCapacity = totalEvents/2 + 10
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) {
s := cluster.MakeTestingClusterSettings()
m := mon.NewMonitor(mon.Options{
Name: "rangefeed",
@@ -1113,7 +1087,7 @@ func TestBudgetReleaseOnProcessorStop(t *testing.T) {
fb := NewFeedBudget(&b, 0, &s.SV)
p, h, stopper := newTestProcessor(t, withBudget(fb), withChanCap(channelCapacity),
- withEventTimeout(100*time.Millisecond), withRangefeedTestType(rt))
+ withEventTimeout(100*time.Millisecond), withProcType(pt))
ctx := context.Background()
defer stopper.Stop(ctx)
@@ -1121,7 +1095,6 @@ func TestBudgetReleaseOnProcessorStop(t *testing.T) {
rStream := newConsumer(50)
defer func() { rStream.Resume() }()
_, _ = p.Register(
- rStream.ctx,
roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -1190,11 +1163,11 @@ func TestBudgetReleaseOnLastStreamError(t *testing.T) {
// objects. Ideally it would be nice to have
const channelCapacity = totalEvents + 5
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) {
fb := newTestBudget(math.MaxInt64)
p, h, stopper := newTestProcessor(t, withBudget(fb), withChanCap(channelCapacity),
- withEventTimeout(time.Millisecond), withRangefeedTestType(rt))
+ withEventTimeout(time.Millisecond), withProcType(pt))
ctx := context.Background()
defer stopper.Stop(ctx)
@@ -1202,7 +1175,6 @@ func TestBudgetReleaseOnLastStreamError(t *testing.T) {
rStream := newConsumer(90)
defer func() { rStream.Resume() }()
_, _ = p.Register(
- rStream.ctx,
roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -1260,12 +1232,12 @@ func TestBudgetReleaseOnOneStreamError(t *testing.T) {
// as sync events used to flush queues.
const channelCapacity = totalEvents/2 + 10
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
+ testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) {
fb := newTestBudget(math.MaxInt64)
p, h, stopper := newTestProcessor(t, withBudget(fb), withChanCap(channelCapacity),
- withEventTimeout(100*time.Millisecond), withRangefeedTestType(rt))
+ withEventTimeout(100*time.Millisecond), withProcType(pt))
ctx := context.Background()
defer stopper.Stop(ctx)
@@ -1273,7 +1245,6 @@ func TestBudgetReleaseOnOneStreamError(t *testing.T) {
r1Stream := newConsumer(50)
defer func() { r1Stream.Resume() }()
_, _ = p.Register(
- r1Stream.ctx,
roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -1287,7 +1258,6 @@ func TestBudgetReleaseOnOneStreamError(t *testing.T) {
// Non-blocking registration that would consume all events.
r2Stream := newConsumer(0)
p.Register(
- r2Stream.ctx,
roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")},
hlc.Timestamp{WallTime: 1},
nil, /* catchUpIter */
@@ -1385,6 +1355,10 @@ func (c *consumer) SendUnbuffered(e *kvpb.RangeFeedEvent) error {
return nil
}
+func (c *consumer) Context() context.Context {
+ return c.ctx
+}
+
func (c *consumer) Cancel() {
c.ctxDone()
}
@@ -1449,69 +1423,67 @@ func TestSizeOfEvent(t *testing.T) {
func TestProcessorBackpressure(t *testing.T) {
defer leaktest.AfterTest(t)()
- testutils.RunValues(t, "feed type", testTypes, func(t *testing.T, rt rangefeedTestType) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
- span := roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("z")}
+ span := roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("z")}
- p, h, stopper := newTestProcessor(t, withSpan(span), withBudget(newTestBudget(math.MaxInt64)),
- withChanCap(1), withEventTimeout(0), withRangefeedTestType(rt))
- defer stopper.Stop(ctx)
- defer p.Stop()
-
- // Add a registration.
- stream := newTestStream()
- ok, _ := p.Register(stream.ctx, span, hlc.MinTimestamp, nil, /* catchUpIter */
- false /* withDiff */, false /* withFiltering */, false /* withOmitRemote */, stream, nil)
- require.True(t, ok)
-
- // Wait for the initial checkpoint.
- h.syncEventAndRegistrations()
- require.Len(t, stream.Events(), 1)
-
- // Block the registration consumer, and spawn a goroutine to post events to
- // the stream, which should block. The rangefeed pipeline buffers a few
- // additional events in intermediate goroutines between channels, so post 10
- // events to be sure.
- unblock := stream.BlockSend()
- defer unblock()
-
- const numEvents = 10
- doneC := make(chan struct{})
- go func() {
- for i := 0; i < numEvents; i++ {
- assert.True(t, p.ForwardClosedTS(ctx, hlc.Timestamp{WallTime: int64(i + 1)}))
- }
- close(doneC)
- }()
-
- // The sender should be blocked for at least 3 seconds.
- select {
- case <-doneC:
- t.Fatal("send unexpectely succeeded")
- case <-time.After(3 * time.Second):
- case <-ctx.Done():
+ p, h, stopper := newTestProcessor(t, withSpan(span), withBudget(newTestBudget(math.MaxInt64)),
+ withChanCap(1), withEventTimeout(0), withProcType(legacyProcessor))
+ defer stopper.Stop(ctx)
+ defer p.Stop()
+
+ // Add a registration.
+ stream := newTestStream()
+ ok, _ := p.Register(span, hlc.MinTimestamp, nil, /* catchUpIter */
+ false /* withDiff */, false /* withFiltering */, false /* withOmitRemote */, stream, nil)
+ require.True(t, ok)
+
+ // Wait for the initial checkpoint.
+ h.syncEventAndRegistrations()
+ require.Len(t, stream.Events(), 1)
+
+ // Block the registration consumer, and spawn a goroutine to post events to
+ // the stream, which should block. The rangefeed pipeline buffers a few
+ // additional events in intermediate goroutines between channels, so post 10
+ // events to be sure.
+ unblock := stream.BlockSend()
+ defer unblock()
+
+ const numEvents = 10
+ doneC := make(chan struct{})
+ go func() {
+ for i := 0; i < numEvents; i++ {
+ assert.True(t, p.ForwardClosedTS(ctx, hlc.Timestamp{WallTime: int64(i + 1)}))
}
+ close(doneC)
+ }()
- // Unblock the sender, and wait for it to complete.
- unblock()
- select {
- case <-doneC:
- case <-time.After(time.Second):
- t.Fatal("sender did not complete")
- }
+ // The sender should be blocked for at least 3 seconds.
+ select {
+ case <-doneC:
+ t.Fatal("send unexpectely succeeded")
+ case <-time.After(3 * time.Second):
+ case <-ctx.Done():
+ }
- // Wait for the final checkpoint event.
- h.syncEventAndRegistrations()
- events := stream.Events()
- require.Equal(t, &kvpb.RangeFeedEvent{
- Checkpoint: &kvpb.RangeFeedCheckpoint{
- Span: span.AsRawSpanWithNoLocals(),
- ResolvedTS: hlc.Timestamp{WallTime: numEvents},
- },
- }, events[len(events)-1])
- })
+ // Unblock the sender, and wait for it to complete.
+ unblock()
+ select {
+ case <-doneC:
+ case <-time.After(time.Second):
+ t.Fatal("sender did not complete")
+ }
+
+ // Wait for the final checkpoint event.
+ h.syncEventAndRegistrations()
+ events := stream.Events()
+ require.Equal(t, &kvpb.RangeFeedEvent{
+ Checkpoint: &kvpb.RangeFeedCheckpoint{
+ Span: span.AsRawSpanWithNoLocals(),
+ ResolvedTS: hlc.Timestamp{WallTime: numEvents},
+ },
+ }, events[len(events)-1])
}
// TestProcessorContextCancellation tests that the processor cancels the
diff --git a/pkg/kv/kvserver/rangefeed/registry.go b/pkg/kv/kvserver/rangefeed/registry.go
index 60d0047ab7b8..f66f151787e0 100644
--- a/pkg/kv/kvserver/rangefeed/registry.go
+++ b/pkg/kv/kvserver/rangefeed/registry.go
@@ -62,7 +62,6 @@ type registration interface {
// baseRegistration is a common base for all registration types. It is intended
// to be embedded in an actual registration struct.
type baseRegistration struct {
- streamCtx context.Context
span roachpb.Span
withDiff bool
withFiltering bool
diff --git a/pkg/kv/kvserver/rangefeed/registry_helper_test.go b/pkg/kv/kvserver/rangefeed/registry_helper_test.go
deleted file mode 100644
index 8421cfc96367..000000000000
--- a/pkg/kv/kvserver/rangefeed/registry_helper_test.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2024 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package rangefeed
-
-import (
- "context"
- "sync"
- "testing"
- "time"
-
- _ "github.com/cockroachdb/cockroach/pkg/keys" // hook up pretty printer
- "github.com/cockroachdb/cockroach/pkg/kv/kvpb"
- "github.com/cockroachdb/cockroach/pkg/roachpb"
- "github.com/cockroachdb/cockroach/pkg/storage"
- "github.com/cockroachdb/cockroach/pkg/storage/enginepb"
- "github.com/cockroachdb/cockroach/pkg/testutils"
- "github.com/cockroachdb/cockroach/pkg/util/hlc"
- "github.com/cockroachdb/cockroach/pkg/util/syncutil"
- "github.com/cockroachdb/cockroach/pkg/util/uuid"
-)
-
-var (
- keyA, keyB = roachpb.Key("a"), roachpb.Key("b")
- keyC, keyD = roachpb.Key("c"), roachpb.Key("d")
- keyX, keyY = roachpb.Key("x"), roachpb.Key("y")
-
- spAB = roachpb.Span{Key: keyA, EndKey: keyB}
- spBC = roachpb.Span{Key: keyB, EndKey: keyC}
- spCD = roachpb.Span{Key: keyC, EndKey: keyD}
- spAC = roachpb.Span{Key: keyA, EndKey: keyC}
- spXY = roachpb.Span{Key: keyX, EndKey: keyY}
-)
-
-var txn1, txn2 = uuid.MakeV4(), uuid.MakeV4()
-
-var keyValues = []storage.MVCCKeyValue{
- makeKV("a", "valA1", 10),
- makeIntent("c", txn1, "txnKeyC", 15),
- makeProvisionalKV("c", "txnKeyC", 15),
- makeKV("c", "valC2", 11),
- makeKV("c", "valC1", 9),
- makeIntent("d", txn2, "txnKeyD", 21),
- makeProvisionalKV("d", "txnKeyD", 21),
- makeKV("d", "valD5", 20),
- makeKV("d", "valD4", 19),
- makeKV("d", "valD3", 16),
- makeKV("d", "valD2", 3),
- makeKV("d", "valD1", 1),
- makeKV("e", "valE3", 6),
- makeKV("e", "valE2", 5),
- makeKV("e", "valE1", 4),
- makeKV("f", "valF3", 7),
- makeKV("f", "valF2", 6),
- makeKV("f", "valF1", 5),
- makeKV("h", "valH1", 15),
- makeKV("m", "valM1", 1),
- makeIntent("n", txn1, "txnKeyN", 12),
- makeProvisionalKV("n", "txnKeyN", 12),
- makeIntent("r", txn1, "txnKeyR", 19),
- makeProvisionalKV("r", "txnKeyR", 19),
- makeKV("r", "valR1", 4),
- makeKV("s", "valS3", 21),
- makeKVWithHeader("s", "valS2", 20, enginepb.MVCCValueHeader{OmitInRangefeeds: true}),
- makeKV("s", "valS1", 19),
- makeIntent("w", txn1, "txnKeyW", 3),
- makeProvisionalKV("w", "txnKeyW", 3),
- makeIntent("z", txn2, "txnKeyZ", 21),
- makeProvisionalKV("z", "txnKeyZ", 21),
- makeKV("z", "valZ1", 4),
-}
-
-func expEvents(filtering bool) []*kvpb.RangeFeedEvent {
- expEvents := []*kvpb.RangeFeedEvent{
- rangeFeedValueWithPrev(
- roachpb.Key("d"),
- makeValWithTs("valD3", 16),
- makeVal("valD2"),
- ),
- rangeFeedValueWithPrev(
- roachpb.Key("d"),
- makeValWithTs("valD4", 19),
- makeVal("valD3"),
- ),
- rangeFeedValueWithPrev(
- roachpb.Key("d"),
- makeValWithTs("valD5", 20),
- makeVal("valD4"),
- ),
- rangeFeedValueWithPrev(
- roachpb.Key("e"),
- makeValWithTs("valE2", 5),
- makeVal("valE1"),
- ),
- rangeFeedValueWithPrev(
- roachpb.Key("e"),
- makeValWithTs("valE3", 6),
- makeVal("valE2"),
- ),
- rangeFeedValue(
- roachpb.Key("f"),
- makeValWithTs("valF1", 5),
- ),
- rangeFeedValueWithPrev(
- roachpb.Key("f"),
- makeValWithTs("valF2", 6),
- makeVal("valF1"),
- ),
- rangeFeedValueWithPrev(
- roachpb.Key("f"),
- makeValWithTs("valF3", 7),
- makeVal("valF2"),
- ),
- rangeFeedValue(
- roachpb.Key("h"),
- makeValWithTs("valH1", 15),
- ),
- rangeFeedValue(
- roachpb.Key("s"),
- makeValWithTs("valS1", 19),
- ),
- }
- if !filtering {
- expEvents = append(expEvents,
- rangeFeedValueWithPrev(
- roachpb.Key("s"),
- makeValWithTs("valS2", 20),
- makeVal("valS1"),
- ))
- }
- expEvents = append(expEvents, rangeFeedValueWithPrev(
- roachpb.Key("s"),
- makeValWithTs("valS3", 21),
- // Even though the event that wrote val2 is filtered out, we want to keep
- // val2 as a previous value of the next event.
- makeVal("valS2"),
- ))
- return expEvents
-}
-
-type testStream struct {
- ctx context.Context
- ctxDone func()
- done chan *kvpb.Error
- mu struct {
- syncutil.Mutex
- sendErr error
- events []*kvpb.RangeFeedEvent
- }
-}
-
-func newTestStream() *testStream {
- ctx, done := context.WithCancel(context.Background())
- return &testStream{ctx: ctx, ctxDone: done, done: make(chan *kvpb.Error, 1)}
-}
-
-func (s *testStream) Context() context.Context {
- return s.ctx
-}
-
-func (s *testStream) Cancel() {
- s.ctxDone()
-}
-
-func (s *testStream) SendUnbufferedIsThreadSafe() {}
-
-func (s *testStream) SendUnbuffered(e *kvpb.RangeFeedEvent) error {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.mu.sendErr != nil {
- return s.mu.sendErr
- }
- s.mu.events = append(s.mu.events, e)
- return nil
-}
-
-func (s *testStream) SetSendErr(err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.mu.sendErr = err
-}
-
-func (s *testStream) Events() []*kvpb.RangeFeedEvent {
- s.mu.Lock()
- defer s.mu.Unlock()
- es := s.mu.events
- s.mu.events = nil
- return es
-}
-
-func (s *testStream) BlockSend() func() {
- s.mu.Lock()
- var once sync.Once
- return func() {
- once.Do(s.mu.Unlock) // safe to call multiple times, e.g. defer and explicit
- }
-}
-
-// Disconnect implements the Stream interface. It mocks the disconnect behavior
-// by sending the error to the done channel.
-func (s *testStream) Disconnect(err *kvpb.Error) {
- s.done <- err
-}
-
-// Error returns the error that was sent to the done channel. It returns nil if
-// no error was sent yet.
-func (s *testStream) Error() error {
- select {
- case err := <-s.done:
- return err.GoError()
- default:
- return nil
- }
-}
-
-// WaitForError waits for the rangefeed to complete and returns the error sent
-// to the done channel. It fails the test if rangefeed cannot complete within 30
-// seconds.
-func (s *testStream) WaitForError(t *testing.T) error {
- select {
- case err := <-s.done:
- return err.GoError()
- case <-time.After(testutils.DefaultSucceedsSoonDuration):
- t.Fatalf("time out waiting for rangefeed completion")
- return nil
- }
-}
-
-type testRegistration struct {
- *bufferedRegistration
- *testStream
-}
-
-func makeCatchUpIterator(
- iter storage.SimpleMVCCIterator, span roachpb.Span, startTime hlc.Timestamp,
-) *CatchUpIterator {
- if iter == nil {
- return nil
- }
- return &CatchUpIterator{
- simpleCatchupIter: simpleCatchupIterAdapter{iter},
- span: span,
- startTime: startTime,
- }
-}
-
-func newTestRegistration(
- span roachpb.Span,
- ts hlc.Timestamp,
- catchup storage.SimpleMVCCIterator,
- withDiff bool,
- withFiltering bool,
- withOmitRemote bool,
-) *testRegistration {
- s := newTestStream()
- r := newBufferedRegistration(
- s.ctx,
- span,
- ts,
- makeCatchUpIterator(catchup, span, ts),
- withDiff,
- withFiltering,
- withOmitRemote,
- 5,
- false, /* blockWhenFull */
- NewMetrics(),
- s,
- func() {},
- )
- return &testRegistration{
- bufferedRegistration: r,
- testStream: s,
- }
-}
diff --git a/pkg/kv/kvserver/rangefeed/registry_test.go b/pkg/kv/kvserver/rangefeed/registry_test.go
index 7093937ada54..49c0e0a5d0d7 100644
--- a/pkg/kv/kvserver/rangefeed/registry_test.go
+++ b/pkg/kv/kvserver/rangefeed/registry_test.go
@@ -8,18 +8,169 @@ package rangefeed
import (
"context"
"fmt"
+ "sync"
"testing"
+ "time"
_ "github.com/cockroachdb/cockroach/pkg/keys" // hook up pretty printer
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage"
+ "github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
+ "github.com/cockroachdb/cockroach/pkg/util/syncutil"
+ "github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/stretchr/testify/require"
)
+var (
+ keyA, keyB = roachpb.Key("a"), roachpb.Key("b")
+ keyC, keyD = roachpb.Key("c"), roachpb.Key("d")
+ keyX, keyY = roachpb.Key("x"), roachpb.Key("y")
+
+ spAB = roachpb.Span{Key: keyA, EndKey: keyB}
+ spBC = roachpb.Span{Key: keyB, EndKey: keyC}
+ spCD = roachpb.Span{Key: keyC, EndKey: keyD}
+ spAC = roachpb.Span{Key: keyA, EndKey: keyC}
+ spXY = roachpb.Span{Key: keyX, EndKey: keyY}
+)
+
+type testStream struct {
+ ctx context.Context
+ ctxDone func()
+ done chan *kvpb.Error
+ mu struct {
+ syncutil.Mutex
+ sendErr error
+ events []*kvpb.RangeFeedEvent
+ }
+}
+
+func newTestStream() *testStream {
+ ctx, done := context.WithCancel(context.Background())
+ return &testStream{ctx: ctx, ctxDone: done, done: make(chan *kvpb.Error, 1)}
+}
+
+func (s *testStream) Context() context.Context {
+ return s.ctx
+}
+
+func (s *testStream) Cancel() {
+ s.ctxDone()
+}
+
+func (s *testStream) SendUnbufferedIsThreadSafe() {}
+
+func (s *testStream) SendUnbuffered(e *kvpb.RangeFeedEvent) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.mu.sendErr != nil {
+ return s.mu.sendErr
+ }
+ s.mu.events = append(s.mu.events, e)
+ return nil
+}
+
+func (s *testStream) SetSendErr(err error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.mu.sendErr = err
+}
+
+func (s *testStream) Events() []*kvpb.RangeFeedEvent {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ es := s.mu.events
+ s.mu.events = nil
+ return es
+}
+
+func (s *testStream) BlockSend() func() {
+ s.mu.Lock()
+ var once sync.Once
+ return func() {
+ once.Do(s.mu.Unlock) // safe to call multiple times, e.g. defer and explicit
+ }
+}
+
+// Disconnect implements the Stream interface. It mocks the disconnect behavior
+// by sending the error to the done channel.
+func (s *testStream) Disconnect(err *kvpb.Error) {
+ s.done <- err
+}
+
+// Error returns the error that was sent to the done channel. It returns nil if
+// no error was sent yet.
+func (s *testStream) Error() error {
+ select {
+ case err := <-s.done:
+ return err.GoError()
+ default:
+ return nil
+ }
+}
+
+// WaitForError waits for the rangefeed to complete and returns the error sent
+// to the done channel. It fails the test if rangefeed cannot complete within 30
+// seconds.
+func (s *testStream) WaitForError(t *testing.T) error {
+ select {
+ case err := <-s.done:
+ return err.GoError()
+ case <-time.After(testutils.DefaultSucceedsSoonDuration):
+ t.Fatalf("time out waiting for rangefeed completion")
+ return nil
+ }
+}
+
+type testRegistration struct {
+ *bufferedRegistration
+ *testStream
+}
+
+func makeCatchUpIterator(
+ iter storage.SimpleMVCCIterator, span roachpb.Span, startTime hlc.Timestamp,
+) *CatchUpIterator {
+ if iter == nil {
+ return nil
+ }
+ return &CatchUpIterator{
+ simpleCatchupIter: simpleCatchupIterAdapter{iter},
+ span: span,
+ startTime: startTime,
+ }
+}
+
+func newTestRegistration(
+ span roachpb.Span,
+ ts hlc.Timestamp,
+ catchup storage.SimpleMVCCIterator,
+ withDiff bool,
+ withFiltering bool,
+ withOmitRemote bool,
+) *testRegistration {
+ s := newTestStream()
+ r := newBufferedRegistration(
+ span,
+ ts,
+ makeCatchUpIterator(catchup, span, ts),
+ withDiff,
+ withFiltering,
+ withOmitRemote,
+ 5,
+ false, /* blockWhenFull */
+ NewMetrics(),
+ s,
+ func() {},
+ )
+ return &testRegistration{
+ bufferedRegistration: r,
+ testStream: s,
+ }
+}
+
func TestRegistrationBasic(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
@@ -105,9 +256,9 @@ func TestRegistrationBasic(t *testing.T) {
false /* withDiff */, false /* withFiltering */, false /* withOmitRemote */)
streamCancelReg.Cancel()
- go streamCancelReg.runOutputLoop(streamCancelReg.ctx, 0)
+ go streamCancelReg.runOutputLoop(ctx, 0)
require.NoError(t, streamCancelReg.waitForCaughtUp(ctx))
- require.Equal(t, streamCancelReg.ctx.Err(), streamCancelReg.WaitForError(t))
+ require.Equal(t, streamCancelReg.stream.Context().Err(), streamCancelReg.WaitForError(t))
}
func TestRegistrationCatchUpScan(t *testing.T) {
@@ -116,7 +267,43 @@ func TestRegistrationCatchUpScan(t *testing.T) {
testutils.RunTrueAndFalse(t, "withFiltering", func(t *testing.T, withFiltering bool) {
// Run a catch-up scan for a registration over a test
// iterator with the following keys.
- iter := newTestIterator(keyValues, roachpb.Key("w"))
+ txn1, txn2 := uuid.MakeV4(), uuid.MakeV4()
+ iter := newTestIterator([]storage.MVCCKeyValue{
+ makeKV("a", "valA1", 10),
+ makeIntent("c", txn1, "txnKeyC", 15),
+ makeProvisionalKV("c", "txnKeyC", 15),
+ makeKV("c", "valC2", 11),
+ makeKV("c", "valC1", 9),
+ makeIntent("d", txn2, "txnKeyD", 21),
+ makeProvisionalKV("d", "txnKeyD", 21),
+ makeKV("d", "valD5", 20),
+ makeKV("d", "valD4", 19),
+ makeKV("d", "valD3", 16),
+ makeKV("d", "valD2", 3),
+ makeKV("d", "valD1", 1),
+ makeKV("e", "valE3", 6),
+ makeKV("e", "valE2", 5),
+ makeKV("e", "valE1", 4),
+ makeKV("f", "valF3", 7),
+ makeKV("f", "valF2", 6),
+ makeKV("f", "valF1", 5),
+ makeKV("h", "valH1", 15),
+ makeKV("m", "valM1", 1),
+ makeIntent("n", txn1, "txnKeyN", 12),
+ makeProvisionalKV("n", "txnKeyN", 12),
+ makeIntent("r", txn1, "txnKeyR", 19),
+ makeProvisionalKV("r", "txnKeyR", 19),
+ makeKV("r", "valR1", 4),
+ makeKV("s", "valS3", 21),
+ makeKVWithHeader("s", "valS2", 20, enginepb.MVCCValueHeader{OmitInRangefeeds: true}),
+ makeKV("s", "valS1", 19),
+ makeIntent("w", txn1, "txnKeyW", 3),
+ makeProvisionalKV("w", "txnKeyW", 3),
+ makeIntent("z", txn2, "txnKeyZ", 21),
+ makeProvisionalKV("z", "txnKeyZ", 21),
+ makeKV("z", "valZ1", 4),
+ }, roachpb.Key("w"))
+
r := newTestRegistration(roachpb.Span{
Key: roachpb.Key("d"),
EndKey: roachpb.Key("w"),
@@ -128,7 +315,71 @@ func TestRegistrationCatchUpScan(t *testing.T) {
require.NotZero(t, r.metrics.RangeFeedCatchUpScanNanos.Count())
// Compare the events sent on the registration's Stream to the expected events.
- require.Equal(t, expEvents(withFiltering), r.Events())
+ expEvents := []*kvpb.RangeFeedEvent{
+ rangeFeedValueWithPrev(
+ roachpb.Key("d"),
+ makeValWithTs("valD3", 16),
+ makeVal("valD2"),
+ ),
+ rangeFeedValueWithPrev(
+ roachpb.Key("d"),
+ makeValWithTs("valD4", 19),
+ makeVal("valD3"),
+ ),
+ rangeFeedValueWithPrev(
+ roachpb.Key("d"),
+ makeValWithTs("valD5", 20),
+ makeVal("valD4"),
+ ),
+ rangeFeedValueWithPrev(
+ roachpb.Key("e"),
+ makeValWithTs("valE2", 5),
+ makeVal("valE1"),
+ ),
+ rangeFeedValueWithPrev(
+ roachpb.Key("e"),
+ makeValWithTs("valE3", 6),
+ makeVal("valE2"),
+ ),
+ rangeFeedValue(
+ roachpb.Key("f"),
+ makeValWithTs("valF1", 5),
+ ),
+ rangeFeedValueWithPrev(
+ roachpb.Key("f"),
+ makeValWithTs("valF2", 6),
+ makeVal("valF1"),
+ ),
+ rangeFeedValueWithPrev(
+ roachpb.Key("f"),
+ makeValWithTs("valF3", 7),
+ makeVal("valF2"),
+ ),
+ rangeFeedValue(
+ roachpb.Key("h"),
+ makeValWithTs("valH1", 15),
+ ),
+ rangeFeedValue(
+ roachpb.Key("s"),
+ makeValWithTs("valS1", 19),
+ ),
+ }
+ if !withFiltering {
+ expEvents = append(expEvents,
+ rangeFeedValueWithPrev(
+ roachpb.Key("s"),
+ makeValWithTs("valS2", 20),
+ makeVal("valS1"),
+ ))
+ }
+ expEvents = append(expEvents, rangeFeedValueWithPrev(
+ roachpb.Key("s"),
+ makeValWithTs("valS3", 21),
+ // Even though the event that wrote val2 is filtered out, we want to keep
+ // val2 as a previous value of the next event.
+ makeVal("valS2"),
+ ))
+ require.Equal(t, expEvents, r.Events())
})
}
diff --git a/pkg/kv/kvserver/rangefeed/scheduled_processor.go b/pkg/kv/kvserver/rangefeed/scheduled_processor.go
index 6bca9a20f38c..3f615892fbaa 100644
--- a/pkg/kv/kvserver/rangefeed/scheduled_processor.go
+++ b/pkg/kv/kvserver/rangefeed/scheduled_processor.go
@@ -296,7 +296,6 @@ func (p *ScheduledProcessor) sendStop(pErr *kvpb.Error) {
//
// NB: startTS is exclusive; the first possible event will be at startTS.Next().
func (p *ScheduledProcessor) Register(
- streamCtx context.Context,
span roachpb.RSpan,
startTS hlc.Timestamp,
catchUpIter *CatchUpIterator,
@@ -318,7 +317,6 @@ func (p *ScheduledProcessor) Register(
"unimplemented: unbuffered registrations for rangefeed, see #126560")
} else {
r = newBufferedRegistration(
- streamCtx,
span.AsRawSpanWithNoLocals(), startTS, catchUpIter, withDiff, withFiltering, withOmitRemote,
p.Config.EventChanCap, blockWhenFull, p.Metrics, stream, disconnectFn,
)
diff --git a/pkg/kv/kvserver/rangefeed/stream.go b/pkg/kv/kvserver/rangefeed/stream.go
index 0238a049d457..b4af811ab0e5 100644
--- a/pkg/kv/kvserver/rangefeed/stream.go
+++ b/pkg/kv/kvserver/rangefeed/stream.go
@@ -6,6 +6,8 @@
package rangefeed
import (
+ "context"
+
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
)
@@ -24,15 +26,17 @@ type Stream interface {
// PerRangeEventSink is an implementation of Stream which annotates each
// response with rangeID and streamID. It is used by MuxRangeFeed.
type PerRangeEventSink struct {
+ ctx context.Context
rangeID roachpb.RangeID
streamID int64
wrapped *UnbufferedSender
}
func NewPerRangeEventSink(
- rangeID roachpb.RangeID, streamID int64, wrapped *UnbufferedSender,
+ ctx context.Context, rangeID roachpb.RangeID, streamID int64, wrapped *UnbufferedSender,
) *PerRangeEventSink {
return &PerRangeEventSink{
+ ctx: ctx,
rangeID: rangeID,
streamID: streamID,
wrapped: wrapped,
@@ -42,6 +46,10 @@ func NewPerRangeEventSink(
var _ kvpb.RangeFeedEventSink = (*PerRangeEventSink)(nil)
var _ Stream = (*PerRangeEventSink)(nil)
+func (s *PerRangeEventSink) Context() context.Context {
+ return s.ctx
+}
+
// SendUnbufferedIsThreadSafe is a no-op declaration method. It is a contract
// that the SendUnbuffered method is thread-safe. Note that
// UnbufferedSender.SendUnbuffered is thread-safe.
diff --git a/pkg/kv/kvserver/rangefeed/task_test.go b/pkg/kv/kvserver/rangefeed/task_test.go
index 80fa3759fe99..403bd45279bb 100644
--- a/pkg/kv/kvserver/rangefeed/task_test.go
+++ b/pkg/kv/kvserver/rangefeed/task_test.go
@@ -334,9 +334,7 @@ func TestInitResolvedTSScan(t *testing.T) {
defer engine.Close()
// Mock processor. We just needs its eventC.
- s := newTestScheduler(1)
- p := ScheduledProcessor{
- scheduler: s.NewClientScheduler(),
+ p := LegacyProcessor{
Config: Config{
Span: span,
},
@@ -479,11 +477,7 @@ func TestTxnPushAttempt(t *testing.T) {
// Mock processor. We configure its key span to exclude one of txn2's lock
// spans and a portion of three of txn4's lock spans.
- s := newTestScheduler(1)
- p := ScheduledProcessor{
- scheduler: s.NewClientScheduler(),
- eventC: make(chan *event, 100),
- }
+ p := LegacyProcessor{eventC: make(chan *event, 100)}
p.Span = roachpb.RSpan{Key: roachpb.RKey("b"), EndKey: roachpb.RKey("m")}
p.TxnPusher = &tp
diff --git a/pkg/kv/kvserver/rangefeed/testutil.go b/pkg/kv/kvserver/rangefeed/testutil.go
new file mode 100644
index 000000000000..eb4a4f3a762f
--- /dev/null
+++ b/pkg/kv/kvserver/rangefeed/testutil.go
@@ -0,0 +1,15 @@
+// Copyright 2023 The Cockroach Authors.
+//
+// Use of this software is governed by the CockroachDB Software License
+// included in the /LICENSE file.
+
+package rangefeed
+
+func NewTestProcessor(id int64) Processor {
+ if id > 0 {
+ return &ScheduledProcessor{
+ scheduler: ClientScheduler{id: id},
+ }
+ }
+ return &LegacyProcessor{}
+}
diff --git a/pkg/kv/kvserver/replica.go b/pkg/kv/kvserver/replica.go
index 29bbd9995da1..0b2fbc22408d 100644
--- a/pkg/kv/kvserver/replica.go
+++ b/pkg/kv/kvserver/replica.go
@@ -811,10 +811,10 @@ type Replica struct {
quotaReleaseQueue []*quotapool.IntAlloc
// Counts calls to Replica.tick()
- ticks int64
+ ticks int
// lastProposalAtTicks tracks the time of the last proposal, in ticks.
- lastProposalAtTicks int64
+ lastProposalAtTicks int
// Counts Raft messages refused due to queue congestion.
droppedMessages int
diff --git a/pkg/kv/kvserver/replica_proposal.go b/pkg/kv/kvserver/replica_proposal.go
index f82e573c800f..c0261d33b8eb 100644
--- a/pkg/kv/kvserver/replica_proposal.go
+++ b/pkg/kv/kvserver/replica_proposal.go
@@ -138,11 +138,11 @@ type ProposalData struct {
// proposedAtTicks is the (logical) time at which this command was
// last (re-)proposed.
- proposedAtTicks int64
+ proposedAtTicks int
// createdAtTicks is the (logical) time at which this command was
// *first* proposed.
- createdAtTicks int64
+ createdAtTicks int
// command is the log entry that is encoded into encodedCommand and proposed
// to raft. Never mutated.
diff --git a/pkg/kv/kvserver/replica_raft.go b/pkg/kv/kvserver/replica_raft.go
index 4128e5686e1e..8407ba04157d 100644
--- a/pkg/kv/kvserver/replica_raft.go
+++ b/pkg/kv/kvserver/replica_raft.go
@@ -612,7 +612,7 @@ func (r *Replica) hasSendTokensRaftMuLockedReplicaMuLocked() bool {
// ticksSinceLastProposalRLocked returns the number of ticks since the last
// proposal.
-func (r *Replica) ticksSinceLastProposalRLocked() int64 {
+func (r *Replica) ticksSinceLastProposalRLocked() int {
return r.mu.ticks - r.mu.lastProposalAtTicks
}
@@ -679,6 +679,20 @@ func (r *Replica) stepRaftGroupRaftMuLocked(req *kvserverpb.RaftMessageRequest)
// If we receive a (pre)vote request, and we find our leader to be dead or
// removed, forget it so we can grant the (pre)votes.
r.maybeForgetLeaderOnVoteRequestLocked()
+ case raftpb.MsgSnap:
+ // Occasionally a snapshot message may arrive under an outdated term,
+ // which would lead to Raft discarding the snapshot. This should be
+ // really rare in practice, but it does happen in tests and in particular
+ // can happen to the synchronous snapshots on the learner path, which
+ // will then have to wait for the raft snapshot queue to send another
+ // snapshot. However, in some tests it is desirable to disable the
+ // raft snapshot queue. This workaround makes that possible.
+ //
+ // See TestReportUnreachableRemoveRace for the test that prompted
+ // this addition.
+ if term := raftGroup.BasicStatus().Term; term > req.Message.Term {
+ req.Message.Term = term
+ }
case raftpb.MsgApp:
if n := len(req.Message.Entries); n > 0 {
sideChannelInfo = replica_rac2.SideChannelInfoUsingRaftMessageRequest{
@@ -1611,7 +1625,7 @@ const (
// ticks of an election timeout (affect only proposals that have had ample time
// to apply but didn't).
func (r *Replica) refreshProposalsLocked(
- ctx context.Context, refreshAtDelta int64, reason refreshRaftReason,
+ ctx context.Context, refreshAtDelta int, reason refreshRaftReason,
) {
if refreshAtDelta != 0 && reason != reasonTicks {
log.Fatalf(ctx, "refreshAtDelta specified for reason %s != reasonTicks", reason)
diff --git a/pkg/kv/kvserver/replica_raft_quiesce.go b/pkg/kv/kvserver/replica_raft_quiesce.go
index 1dc56f138bd5..7f825362ea14 100644
--- a/pkg/kv/kvserver/replica_raft_quiesce.go
+++ b/pkg/kv/kvserver/replica_raft_quiesce.go
@@ -27,7 +27,7 @@ import (
// should quiesce. Unquiescing incurs a raft proposal which has a non-neglible
// cost, and low-latency clusters may otherwise (un)quiesce very frequently,
// e.g. on every tick.
-var quiesceAfterTicks = envutil.EnvOrDefaultInt64("COCKROACH_QUIESCE_AFTER_TICKS", 6)
+var quiesceAfterTicks = envutil.EnvOrDefaultInt("COCKROACH_QUIESCE_AFTER_TICKS", 6)
// raftDisableQuiescence disables raft quiescence.
var raftDisableQuiescence = envutil.EnvOrDefaultBool("COCKROACH_DISABLE_QUIESCENCE", false)
@@ -213,7 +213,7 @@ type quiescer interface {
hasPendingProposalsRLocked() bool
hasPendingProposalQuotaRLocked() bool
hasSendTokensRaftMuLockedReplicaMuLocked() bool
- ticksSinceLastProposalRLocked() int64
+ ticksSinceLastProposalRLocked() int
mergeInProgressRLocked() bool
isDestroyedRLocked() (DestroyReason, error)
}
diff --git a/pkg/kv/kvserver/replica_range_lease.go b/pkg/kv/kvserver/replica_range_lease.go
index 3e0e2f1b9b07..258315c38752 100644
--- a/pkg/kv/kvserver/replica_range_lease.go
+++ b/pkg/kv/kvserver/replica_range_lease.go
@@ -1268,12 +1268,25 @@ func (r *Replica) redirectOnOrAcquireLeaseForRequest(
msg = "lease state could not be determined"
}
log.VEventf(ctx, 2, "%s", msg)
- // TODO(nvanbenschoten): now that leader leases are going to return an
- // ERROR status on follower replicas instead of a VALID status, we will
- // hit this path more. Do we need to add the lease to this
- // NotLeaseHolder error to ensure fast redirection?
+ // If the lease state could not be determined as valid or invalid, then
+ // we return an error to redirect the request to the replica pointed to
+ // by the lease record. We don't know for sure who the leaseholder is,
+ // but that replica is still the best bet.
+ //
+ // However, we only do this if the lease is not owned by the local store
+ // who is currently struggling to evaluate the validity of the lease.
+ // This avoids self-redirection, which might prevent the client from
+ // trying other replicas.
+ //
+ // TODO(nvanbenschoten): this self-redirection case only happens with
+ // epoch-based leases, so we can remove this logic when we remove that
+ // lease type.
+ var holder roachpb.Lease
+ if !status.Lease.OwnedBy(r.store.StoreID()) {
+ holder = status.Lease
+ }
return nil, kvserverpb.LeaseStatus{}, false, kvpb.NewError(
- kvpb.NewNotLeaseHolderError(roachpb.Lease{}, r.store.StoreID(), r.shMu.state.Desc, msg))
+ kvpb.NewNotLeaseHolderError(holder, r.store.StoreID(), r.shMu.state.Desc, msg))
case kvserverpb.LeaseState_VALID, kvserverpb.LeaseState_UNUSABLE:
if !status.Lease.OwnedBy(r.store.StoreID()) {
diff --git a/pkg/kv/kvserver/replica_rangefeed.go b/pkg/kv/kvserver/replica_rangefeed.go
index 646492ee6a3e..c9bddd3495d4 100644
--- a/pkg/kv/kvserver/replica_rangefeed.go
+++ b/pkg/kv/kvserver/replica_rangefeed.go
@@ -31,6 +31,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
+ "github.com/cockroachdb/cockroach/pkg/util/metamorphic"
"github.com/cockroachdb/cockroach/pkg/util/syncutil/singleflight"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
@@ -81,10 +82,14 @@ var RangeFeedUseScheduler = settings.RegisterBoolSetting(
"use shared fixed pool of workers for all range feeds instead of a "+
"worker per range (worker pool size is determined by "+
"COCKROACH_RANGEFEED_SCHEDULER_WORKERS env variable)",
- true,
- settings.Retired,
+ metamorphic.ConstantWithTestBool("kv_rangefeed_scheduler_enabled", true),
)
+// RangefeedSchedulerDisabled is a kill switch for scheduler based rangefeed
+// processors. To be removed in 24.1 after new processor becomes default.
+var RangefeedSchedulerDisabled = envutil.EnvOrDefaultBool("COCKROACH_RANGEFEED_DISABLE_SCHEDULER",
+ false)
+
// RangefeedUseBufferedSender controls whether rangefeed uses a node level
// buffered sender to buffer events instead of buffering events separately in a
// channel at a per client per registration level. It is currently left
@@ -237,19 +242,16 @@ func (tp *rangefeedTxnPusher) Barrier(ctx context.Context) error {
// complete. The surrounding store's ConcurrentRequestLimiter is used to limit
// the number of rangefeeds using catch-up iterators at the same time.
func (r *Replica) RangeFeed(
- streamCtx context.Context,
- args *kvpb.RangeFeedRequest,
- stream rangefeed.Stream,
- pacer *admission.Pacer,
+ args *kvpb.RangeFeedRequest, stream rangefeed.Stream, pacer *admission.Pacer,
) error {
- streamCtx = r.AnnotateCtx(streamCtx)
+ ctx := r.AnnotateCtx(stream.Context())
rSpan, err := keys.SpanAddr(args.Span)
if err != nil {
return err
}
- if err := r.ensureClosedTimestampStarted(streamCtx); err != nil {
+ if err := r.ensureClosedTimestampStarted(ctx); err != nil {
return err.GoError()
}
@@ -281,7 +283,7 @@ func (r *Replica) RangeFeed(
iterSemRelease := func() {}
if !args.Timestamp.IsEmpty() {
usingCatchUpIter = true
- alloc, err := r.store.limiters.ConcurrentRangefeedIters.Begin(streamCtx)
+ alloc, err := r.store.limiters.ConcurrentRangefeedIters.Begin(ctx)
if err != nil {
return err
}
@@ -305,7 +307,7 @@ func (r *Replica) RangeFeed(
// critical-section as the registration is established. This ensures that
// the registration doesn't miss any events.
r.raftMu.Lock()
- if err := r.checkExecutionCanProceedForRangeFeed(streamCtx, rSpan, checkTS); err != nil {
+ if err := r.checkExecutionCanProceedForRangeFeed(ctx, rSpan, checkTS); err != nil {
r.raftMu.Unlock()
iterSemRelease()
return err
@@ -330,7 +332,7 @@ func (r *Replica) RangeFeed(
}
p, err := r.registerWithRangefeedRaftMuLocked(
- streamCtx, rSpan, args.Timestamp, catchUpIter, args.WithDiff, args.WithFiltering, omitRemote, stream,
+ ctx, rSpan, args.Timestamp, catchUpIter, args.WithDiff, args.WithFiltering, omitRemote, stream,
)
r.raftMu.Unlock()
@@ -420,7 +422,7 @@ func logSlowRangefeedRegistration(ctx context.Context) func() {
// iterator in case registration fails. Successful registration takes iterator
// ownership and ensures it is closed when catch up is complete or aborted.
func (r *Replica) registerWithRangefeedRaftMuLocked(
- streamCtx context.Context,
+ ctx context.Context,
span roachpb.RSpan,
startTS hlc.Timestamp, // exclusive
catchUpIter *rangefeed.CatchUpIterator,
@@ -429,7 +431,7 @@ func (r *Replica) registerWithRangefeedRaftMuLocked(
withOmitRemote bool,
stream rangefeed.Stream,
) (rangefeed.Processor, error) {
- defer logSlowRangefeedRegistration(streamCtx)()
+ defer logSlowRangefeedRegistration(ctx)()
// Always defer closing iterator to cover old and new failure cases.
// On successful path where registration succeeds reset catchUpIter to prevent
@@ -447,7 +449,7 @@ func (r *Replica) registerWithRangefeedRaftMuLocked(
p := r.rangefeedMu.proc
if p != nil {
- reg, filter := p.Register(streamCtx, span, startTS, catchUpIter, withDiff, withFiltering, withOmitRemote,
+ reg, filter := p.Register(span, startTS, catchUpIter, withDiff, withFiltering, withOmitRemote,
stream, func() { r.maybeDisconnectEmptyRangefeed(p) })
if reg {
// Registered successfully with an existing processor.
@@ -478,6 +480,11 @@ func (r *Replica) registerWithRangefeedRaftMuLocked(
// Create a new rangefeed.
feedBudget := r.store.GetStoreConfig().RangefeedBudgetFactory.CreateBudget(isSystemSpan)
+ var sched *rangefeed.Scheduler
+ if shouldUseRangefeedScheduler(&r.ClusterSettings().SV) {
+ sched = r.store.getRangefeedScheduler()
+ }
+
desc := r.Desc()
tp := rangefeedTxnPusher{ir: r.store.intentResolver, r: r, span: desc.RSpan()}
cfg := rangefeed.Config{
@@ -488,12 +495,13 @@ func (r *Replica) registerWithRangefeedRaftMuLocked(
RangeID: r.RangeID,
Span: desc.RSpan(),
TxnPusher: &tp,
+ PushTxnsInterval: r.store.TestingKnobs().RangeFeedPushTxnsInterval,
PushTxnsAge: r.store.TestingKnobs().RangeFeedPushTxnsAge,
EventChanCap: defaultEventChanCap,
EventChanTimeout: defaultEventChanTimeout,
Metrics: r.store.metrics.RangeFeedMetrics,
MemBudget: feedBudget,
- Scheduler: r.store.getRangefeedScheduler(),
+ Scheduler: sched,
Priority: isSystemSpan, // only takes effect when Scheduler != nil
}
p = rangefeed.NewProcessor(cfg)
@@ -506,7 +514,7 @@ func (r *Replica) registerWithRangefeedRaftMuLocked(
// waiting for the Register call below to return.
r.raftMu.AssertHeld()
- scanner, err := rangefeed.NewSeparatedIntentScanner(streamCtx, r.store.TODOEngine(), desc.RSpan())
+ scanner, err := rangefeed.NewSeparatedIntentScanner(ctx, r.store.TODOEngine(), desc.RSpan())
if err != nil {
stream.Disconnect(kvpb.NewError(err))
return nil
@@ -528,7 +536,7 @@ func (r *Replica) registerWithRangefeedRaftMuLocked(
// any other goroutines are able to stop the processor. In other words,
// this ensures that the only time the registration fails is during
// server shutdown.
- reg, filter := p.Register(streamCtx, span, startTS, catchUpIter, withDiff,
+ reg, filter := p.Register(span, startTS, catchUpIter, withDiff,
withFiltering, withOmitRemote, stream, func() { r.maybeDisconnectEmptyRangefeed(p) })
if !reg {
select {
@@ -548,7 +556,7 @@ func (r *Replica) registerWithRangefeedRaftMuLocked(
// Check for an initial closed timestamp update immediately to help
// initialize the rangefeed's resolved timestamp as soon as possible.
- r.handleClosedTimestampUpdateRaftMuLocked(streamCtx, r.GetCurrentClosedTimestamp(streamCtx))
+ r.handleClosedTimestampUpdateRaftMuLocked(ctx, r.GetCurrentClosedTimestamp(ctx))
return p, nil
}
@@ -942,6 +950,10 @@ func (r *Replica) ensureClosedTimestampStarted(ctx context.Context) *kvpb.Error
return nil
}
+func shouldUseRangefeedScheduler(sv *settings.Values) bool {
+ return RangeFeedUseScheduler.Get(sv) && !RangefeedSchedulerDisabled
+}
+
// TestGetReplicaRangefeedProcessor exposes rangefeed processor for test
// introspection. Note that while retrieving processor is threadsafe, invoking
// processor methods should be done with caution to not break any invariants.
diff --git a/pkg/kv/kvserver/replica_rangefeed_test.go b/pkg/kv/kvserver/replica_rangefeed_test.go
index dede39d702fb..013fdd9a049d 100644
--- a/pkg/kv/kvserver/replica_rangefeed_test.go
+++ b/pkg/kv/kvserver/replica_rangefeed_test.go
@@ -16,11 +16,13 @@ import (
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
+ clientrf "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness"
+ "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rangefeed"
"github.com/cockroachdb/cockroach/pkg/raft/raftpb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server"
@@ -63,6 +65,10 @@ func (s *testStream) SetHeader(metadata.MD) error { panic("unimplemented") }
func (s *testStream) SendHeader(metadata.MD) error { panic("unimplemented") }
func (s *testStream) SetTrailer(metadata.MD) { panic("unimplemented") }
+func (s *testStream) Context() context.Context {
+ return s.ctx
+}
+
func (s *testStream) Cancel() {
s.cancel()
}
@@ -104,7 +110,7 @@ func (s *testStream) WaitForError(t *testing.T) error {
func waitRangeFeed(
t *testing.T, store *kvserver.Store, req *kvpb.RangeFeedRequest, stream *testStream,
) error {
- if err := store.RangeFeed(stream.ctx, req, stream); err != nil {
+ if err := store.RangeFeed(req, stream); err != nil {
return err
}
return stream.WaitForError(t)
@@ -727,6 +733,61 @@ func TestReplicaRangefeedOriginIDFiltering(t *testing.T) {
})
}
+func TestScheduledProcessorKillSwitch(t *testing.T) {
+ defer leaktest.AfterTest(t)()
+ defer log.Scope(t).Close(t)
+
+ kvserver.RangefeedSchedulerDisabled = true
+ defer func() { kvserver.RangefeedSchedulerDisabled = false }()
+
+ ctx := context.Background()
+ ts, err := serverutils.NewServer(base.TestServerArgs{
+ DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,
+ })
+ require.NoError(t, err, "failed to start test server")
+ require.NoError(t, ts.Start(ctx), "start server")
+ defer ts.Stopper().Stop(ctx)
+
+ db := ts.SystemLayer().SQLConn(t)
+ _, err = db.Exec("set cluster setting kv.rangefeed.enabled = t")
+ require.NoError(t, err, "can't enable rangefeeds")
+ _, err = db.Exec("set cluster setting kv.rangefeed.scheduler.enabled = t")
+ require.NoError(t, err, "can't enable rangefeed scheduler")
+
+ sr, err := ts.ScratchRange()
+ require.NoError(t, err, "can't create scratch range")
+ f := ts.RangeFeedFactory().(*clientrf.Factory)
+ rf, err := f.RangeFeed(ctx, "test-feed", []roachpb.Span{{Key: sr, EndKey: sr.PrefixEnd()}},
+ hlc.Timestamp{},
+ func(ctx context.Context, value *kvpb.RangeFeedValue) {},
+ )
+ require.NoError(t, err, "failed to start rangefeed")
+ defer rf.Close()
+
+ rd, err := ts.LookupRange(sr)
+ require.NoError(t, err, "failed to get descriptor for scratch range")
+
+ stores := ts.GetStores().(*kvserver.Stores)
+ _ = stores.VisitStores(func(s *kvserver.Store) error {
+ repl, err := s.GetReplica(rd.RangeID)
+ require.NoError(t, err, "failed to find scratch range replica in store")
+ var proc rangefeed.Processor
+ // Note that we can't rely on checkpoint or event because client rangefeed
+ // call can return and emit first checkpoint and data before processor is
+ // actually attached to replica.
+ testutils.SucceedsSoon(t, func() error {
+ proc = kvserver.TestGetReplicaRangefeedProcessor(repl)
+ if proc == nil {
+ return errors.New("scratch range must have processor")
+ }
+ return nil
+ })
+ require.IsType(t, (*rangefeed.LegacyProcessor)(nil), proc,
+ "kill switch didn't prevent scheduled processor creation")
+ return nil
+ })
+}
+
func TestReplicaRangefeedErrors(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
diff --git a/pkg/kv/kvserver/replica_store_liveness.go b/pkg/kv/kvserver/replica_store_liveness.go
index 7f879265da50..fe0f518f431d 100644
--- a/pkg/kv/kvserver/replica_store_liveness.go
+++ b/pkg/kv/kvserver/replica_store_liveness.go
@@ -21,6 +21,8 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/log"
)
+// RaftLeaderFortificationFractionEnabled controls the fraction of ranges for
+// which the raft leader fortification protocol is enabled.
var RaftLeaderFortificationFractionEnabled = settings.RegisterFloatSetting(
settings.SystemOnly,
"kv.raft.leader_fortification.fraction_enabled",
diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go
index 09a7a07e5364..1b307a9cab69 100644
--- a/pkg/kv/kvserver/replica_test.go
+++ b/pkg/kv/kvserver/replica_test.go
@@ -8189,7 +8189,7 @@ func TestReplicaRefreshPendingCommandsTicks(t *testing.T) {
r.mu.Unlock()
// We tick the replica 3*RaftReproposalTimeoutTicks.
- for i := int64(0); i < 3*reproposalTicks; i++ {
+ for i := 0; i < 3*reproposalTicks; i++ {
// Add another pending command on each iteration.
id := fmt.Sprintf("%08d", i)
ba := &kvpb.BatchRequest{}
@@ -8245,7 +8245,7 @@ func TestReplicaRefreshPendingCommandsTicks(t *testing.T) {
// time, this will be 1 reproposal (the one at ticks=0 for the reproposal at
// ticks=reproposalTicks), then +reproposalTicks reproposals each time.
if (ticks % reproposalTicks) == 0 {
- if exp := i + 2 - reproposalTicks; int64(len(reproposed)) != exp { // +1 to offset i, +1 for inclusive
+ if exp := i + 2 - reproposalTicks; len(reproposed) != exp { // +1 to offset i, +1 for inclusive
t.Fatalf("%d: expected %d reproposed commands, but found %d", i, exp, len(reproposed))
}
} else {
@@ -9860,7 +9860,7 @@ type testQuiescer struct {
numProposals int
pendingQuota bool
sendTokens bool
- ticksSinceLastProposal int64
+ ticksSinceLastProposal int
status *raft.SparseStatus
lastIndex kvpb.RaftIndex
raftReady bool
@@ -9917,7 +9917,7 @@ func (q *testQuiescer) hasSendTokensRaftMuLockedReplicaMuLocked() bool {
return q.sendTokens
}
-func (q *testQuiescer) ticksSinceLastProposalRLocked() int64 {
+func (q *testQuiescer) ticksSinceLastProposalRLocked() int {
return q.ticksSinceLastProposal
}
diff --git a/pkg/kv/kvserver/scheduler.go b/pkg/kv/kvserver/scheduler.go
index 12e0d553b5e4..b62f5d298208 100644
--- a/pkg/kv/kvserver/scheduler.go
+++ b/pkg/kv/kvserver/scheduler.go
@@ -149,7 +149,7 @@ type raftScheduleState struct {
// TODO(pavelkalinnikov): add a node health metric for the ticks.
//
// INVARIANT: flags&stateRaftTick == 0 iff ticks == 0.
- ticks int64
+ ticks int
}
var raftSchedulerBatchPool = sync.Pool{
@@ -230,7 +230,7 @@ type raftSchedulerShard struct {
queue rangeIDQueue
state map[roachpb.RangeID]raftScheduleState
numWorkers int
- maxTicks int64
+ maxTicks int
stopped bool
}
@@ -241,7 +241,7 @@ func newRaftScheduler(
numWorkers int,
shardSize int,
priorityWorkers int,
- maxTicks int64,
+ maxTicks int,
) *raftScheduler {
s := &raftScheduler{
ambientContext: ambient,
@@ -273,7 +273,7 @@ func newRaftScheduler(
return s
}
-func newRaftSchedulerShard(numWorkers int, maxTicks int64) *raftSchedulerShard {
+func newRaftSchedulerShard(numWorkers, maxTicks int) *raftSchedulerShard {
shard := &raftSchedulerShard{
state: map[roachpb.RangeID]raftScheduleState{},
numWorkers: numWorkers,
@@ -465,7 +465,7 @@ func (s *raftScheduler) NewEnqueueBatch() *raftSchedulerBatch {
func (ss *raftSchedulerShard) enqueue1Locked(
addFlags raftScheduleFlags, id roachpb.RangeID, now int64,
) int {
- ticks := int64((addFlags & stateRaftTick) / stateRaftTick) // 0 or 1
+ ticks := int((addFlags & stateRaftTick) / stateRaftTick) // 0 or 1
prevState := ss.state[id]
if prevState.flags&addFlags == addFlags && ticks == 0 {
diff --git a/pkg/kv/kvserver/store.go b/pkg/kv/kvserver/store.go
index fbeca39b7c2d..46867e63aee3 100644
--- a/pkg/kv/kvserver/store.go
+++ b/pkg/kv/kvserver/store.go
@@ -3240,9 +3240,7 @@ func (s *Store) Descriptor(ctx context.Context, useCached bool) (*roachpb.StoreD
// RangeFeed registers a rangefeed over the specified span. It sends updates to
// the provided stream and returns a future with an optional error when the rangefeed is
// complete.
-func (s *Store) RangeFeed(
- streamCtx context.Context, args *kvpb.RangeFeedRequest, stream rangefeed.Stream,
-) error {
+func (s *Store) RangeFeed(args *kvpb.RangeFeedRequest, stream rangefeed.Stream) error {
if filter := s.TestingKnobs().TestingRangefeedFilter; filter != nil {
if pErr := filter(args, stream); pErr != nil {
return pErr.GoError()
@@ -3269,7 +3267,7 @@ func (s *Store) RangeFeed(
tenID, _ := repl.TenantID()
pacer := s.cfg.KVAdmissionController.AdmitRangefeedRequest(tenID, args)
- return repl.RangeFeed(streamCtx, args, stream, pacer)
+ return repl.RangeFeed(args, stream, pacer)
}
// updateReplicationGauges counts a number of simple replication statistics for
@@ -4098,12 +4096,6 @@ func (s *Store) unregisterLeaseholderByID(ctx context.Context, rangeID roachpb.R
}
}
-// TestingStoreLivenessMessageHandler returns the store's store liveness
-// message handler for testing purposes.
-func (s *Store) TestingStoreLivenessMessageHandler() storeliveness.MessageHandler {
- return s.storeLiveness.(*storeliveness.SupportManager)
-}
-
// getRootMemoryMonitorForKV returns a BytesMonitor to use for KV memory
// tracking.
func (s *Store) getRootMemoryMonitorForKV() *mon.BytesMonitor {
diff --git a/pkg/kv/kvserver/storeliveness/BUILD.bazel b/pkg/kv/kvserver/storeliveness/BUILD.bazel
index b885f35300e5..29fb50c206da 100644
--- a/pkg/kv/kvserver/storeliveness/BUILD.bazel
+++ b/pkg/kv/kvserver/storeliveness/BUILD.bazel
@@ -11,7 +11,6 @@ go_library(
"support_manager.go",
"supporter_state.go",
"transport.go",
- "unreliable_store_liveness_handler.go",
],
importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness",
visibility = ["//visibility:public"],
@@ -33,6 +32,7 @@ go_library(
"//pkg/util/syncutil",
"//pkg/util/timeutil",
"@com_github_cockroachdb_errors//:errors",
+ "@com_github_cockroachdb_redact//:redact",
"@org_golang_google_grpc//:grpc",
"@org_golang_x_exp//maps",
],
diff --git a/pkg/kv/kvserver/storeliveness/requester_state.go b/pkg/kv/kvserver/storeliveness/requester_state.go
index 31320f80ebef..48f14aad18db 100644
--- a/pkg/kv/kvserver/storeliveness/requester_state.go
+++ b/pkg/kv/kvserver/storeliveness/requester_state.go
@@ -13,7 +13,9 @@ import (
slpb "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness/storelivenesspb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
+ "github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
+ "github.com/cockroachdb/redact"
)
// requesterState stores the core data structures for requesting support.
@@ -66,7 +68,7 @@ const (
// stores. The typical interactions with requesterStateHandler are:
// - getSupportFrom(id slpb.StoreIdent)
// - addStore(id slpb.StoreIdent)
-// - removeStore(id slpb.StoreIdent)
+// - markIdleStores()
// - rsfu := checkOutUpdate()
// rsfu.getHeartbeatsToSend(now hlc.Timestamp, interval time.Duration)
// checkInUpdate(rsfu)
@@ -135,18 +137,19 @@ type requesterStateForUpdate struct {
// requesterState.supportFrom. The returned boolean indicates whether the given
// store is present in the supportFrom map; it does NOT indicate whether support
// from that store is provided.
-func (rsh *requesterStateHandler) getSupportFrom(id slpb.StoreIdent) (slpb.SupportState, bool) {
+func (rsh *requesterStateHandler) getSupportFrom(
+ id slpb.StoreIdent,
+) (supportState slpb.SupportState, exists bool, wasIdle bool) {
rsh.mu.RLock()
defer rsh.mu.RUnlock()
- rs, ok := rsh.requesterState.supportFrom[id]
- var supportState slpb.SupportState
- if ok {
+ rs, exists := rsh.requesterState.supportFrom[id]
+ if exists {
// If a store is present, set recentlyQueried to true. Otherwise, if
// this is a new store, recentlyQueried will be set to true in addStore.
- rs.recentlyQueried.Store(active)
+ wasIdle = rs.recentlyQueried.Swap(active) == idle
supportState = rs.state
}
- return supportState, ok
+ return supportState, exists, wasIdle
}
// exportAllSupportFrom exports a copy of all SupportStates from the
@@ -176,7 +179,7 @@ func (rsh *requesterStateHandler) addStore(id slpb.StoreIdent) bool {
}
// Adding a store is done in response to SupportFrom, so it's ok to set
// recentlyQueried to active here. This also ensures the store will not
- // be removed immediately after adding.
+ // be marked as idle immediately after adding.
rs.recentlyQueried.Store(active)
rsh.requesterState.supportFrom[id] = &rs
return true
@@ -187,7 +190,7 @@ func (rsh *requesterStateHandler) addStore(id slpb.StoreIdent) bool {
// markIdleStores marks all stores in the requesterState.supportFrom map as
// idle if they have not appeared in a getSupportFrom call since the last time
// markIdleStores was called.
-func (rsh *requesterStateHandler) markIdleStores() {
+func (rsh *requesterStateHandler) markIdleStores(ctx context.Context) {
// Marking stores doesn't require persisting anything to disk, so it doesn't
// need to go through the full checkOut/checkIn process. However, we still
// check out the update to ensure that there are no concurrent updates.
@@ -197,7 +200,9 @@ func (rsh *requesterStateHandler) markIdleStores() {
defer rsh.mu.RUnlock()
for _, rs := range rsh.requesterState.supportFrom {
if !rs.recentlyQueried.CompareAndSwap(active, inactive) {
- rs.recentlyQueried.CompareAndSwap(inactive, idle)
+ if rs.recentlyQueried.CompareAndSwap(inactive, idle) {
+ log.Infof(ctx, "stopping heartbeats to idle store %+v", rs.state.Target)
+ }
}
}
}
@@ -370,7 +375,9 @@ func (rsfu *requesterStateForUpdate) generateHeartbeats(from slpb.StoreIdent) []
// handleHeartbeatResponse handles a single heartbeat response message. It
// updates the inProgress view of requesterStateForUpdate only if there are any
// changes.
-func (rsfu *requesterStateForUpdate) handleHeartbeatResponse(msg *slpb.Message) {
+func (rsfu *requesterStateForUpdate) handleHeartbeatResponse(
+ ctx context.Context, msg *slpb.Message,
+) {
from := msg.From
meta := rsfu.getMeta()
ss, ok := rsfu.getSupportFrom(from)
@@ -385,6 +392,7 @@ func (rsfu *requesterStateForUpdate) handleHeartbeatResponse(msg *slpb.Message)
}
if ss != ssNew {
rsfu.inProgress.supportFrom[from] = &requestedSupport{state: ssNew}
+ logSupportFromChange(ctx, ss, ssNew)
}
}
@@ -399,20 +407,30 @@ func handleHeartbeatResponse(
if ss.Epoch == msg.Epoch {
ss.Expiration.Forward(msg.Expiration)
} else if ss.Epoch < msg.Epoch {
- assert(
- ss.Epoch == msg.Epoch-1,
- "the supporter epoch leads the requester epoch by more than 1",
- )
+ assert(ss.Epoch == msg.Epoch-1, "epoch incremented by more than 1")
ss.Epoch = msg.Epoch
- assert(
- msg.Expiration == hlc.Timestamp{},
- "the supporter responded with an incremented epoch but non-zero timestamp",
- )
+ assert(msg.Expiration == hlc.Timestamp{}, "incremented epoch but non-zero timestamp")
ss.Expiration = msg.Expiration
}
return rm, ss
}
+// logSupportFromChange logs the old and new support state after handling a
+// heartbeat response. The logic mirrors that in handleHeartbeatResponse and
+// uses the same assertions.
+func logSupportFromChange(ctx context.Context, ss slpb.SupportState, ssNew slpb.SupportState) {
+ if ss.Epoch == ssNew.Epoch {
+ if ss.Expiration.IsEmpty() {
+ log.Infof(ctx, "received support from %s", supportChangeStr(ss, ssNew))
+ } else if log.ExpensiveLogEnabled(ctx, 3) {
+ log.VInfof(ctx, 3, "extended support from %s", supportChangeStr(ss, ssNew))
+ }
+ } else {
+ assert(ss.Epoch < ssNew.Epoch, "epoch regressed")
+ log.Infof(ctx, "lost support from %s", supportChangeStr(ss, ssNew))
+ }
+}
+
// Functions for incrementing MaxEpoch.
// incrementMaxEpoch increments the inProgress view of MaxEpoch.
@@ -428,3 +446,11 @@ func assert(condition bool, msg string) {
panic(msg)
}
}
+
+func supportChangeStr(
+ current slpb.SupportState, previous slpb.SupportState,
+) redact.RedactableString {
+ return redact.Sprintf(
+ "store %+v; current = %+v, previous = %+v", current.Target, current, previous,
+ )
+}
diff --git a/pkg/kv/kvserver/storeliveness/store_liveness_test.go b/pkg/kv/kvserver/storeliveness/store_liveness_test.go
index e6a07451d749..2fc69393bc44 100644
--- a/pkg/kv/kvserver/storeliveness/store_liveness_test.go
+++ b/pkg/kv/kvserver/storeliveness/store_liveness_test.go
@@ -49,7 +49,7 @@ func TestStoreLiveness(t *testing.T) {
t, path, func(t *testing.T, d *datadriven.TestData) string {
switch d.Cmd {
case "mark-idle-stores":
- sm.requesterStateHandler.markIdleStores()
+ sm.requesterStateHandler.markIdleStores(ctx)
return ""
case "support-from":
@@ -66,7 +66,7 @@ func TestStoreLiveness(t *testing.T) {
now := parseTimestamp(t, d, "now")
manual.AdvanceTo(now.GoTime())
sm.options.LivenessInterval = parseDuration(t, d, "liveness-interval")
- sm.maybeAddStores()
+ sm.maybeAddStores(ctx)
sm.sendHeartbeats(ctx)
heartbeats := sender.drainSentMessages()
return fmt.Sprintf("heartbeats:\n%s", printMsgs(heartbeats))
diff --git a/pkg/kv/kvserver/storeliveness/support_manager.go b/pkg/kv/kvserver/storeliveness/support_manager.go
index 45730e860339..244f67a77aea 100644
--- a/pkg/kv/kvserver/storeliveness/support_manager.go
+++ b/pkg/kv/kvserver/storeliveness/support_manager.go
@@ -124,7 +124,7 @@ func (sm *SupportManager) InspectSupportFor() slpb.SupportStatesPerStore {
// SupportFrom implements the Fabric interface. It delegates the response to the
// SupportManager's supporterStateHandler.
func (sm *SupportManager) SupportFrom(id slpb.StoreIdent) (slpb.Epoch, hlc.Timestamp) {
- ss, ok := sm.requesterStateHandler.getSupportFrom(id)
+ ss, ok, wasIdle := sm.requesterStateHandler.getSupportFrom(id)
if !ok {
// If this is the first time SupportFrom has been called for this store,
// the store will be added to requesterStateHandler before the next
@@ -133,11 +133,15 @@ func (sm *SupportManager) SupportFrom(id slpb.StoreIdent) (slpb.Epoch, hlc.Times
// uses a map to avoid duplicates, and the requesterStateHandler's
// addStore checks if the store exists before adding it.
sm.storesToAdd.addStore(id)
- log.VInfof(
- context.Background(), 2, "SupportFrom called for the first time with store id %+v", id,
- )
+ log.VInfof(context.TODO(), 2, "store %+v is not heartbeating store %+v yet", sm.storeID, id)
return 0, hlc.Timestamp{}
}
+ if wasIdle {
+ log.Infof(
+ context.TODO(), "store %+v is starting to heartbeat store %+v (after being idle)",
+ sm.storeID, id,
+ )
+ }
return ss.Epoch, ss.Expiration
}
@@ -232,7 +236,7 @@ func (sm *SupportManager) startLoop(ctx context.Context) {
select {
case <-sm.storesToAdd.sig:
- sm.maybeAddStores()
+ sm.maybeAddStores(ctx)
sm.sendHeartbeats(ctx)
case <-heartbeatTicker.C:
@@ -242,7 +246,7 @@ func (sm *SupportManager) startLoop(ctx context.Context) {
sm.withdrawSupport(ctx)
case <-idleSupportFromTicker.C:
- sm.requesterStateHandler.markIdleStores()
+ sm.requesterStateHandler.markIdleStores(ctx)
case <-receiveQueueSig:
// Decrementing the queue metrics is done in handleMessages.
@@ -257,14 +261,11 @@ func (sm *SupportManager) startLoop(ctx context.Context) {
// maybeAddStores drains storesToAdd and delegates adding any new stores to the
// SupportManager's requesterStateHandler.
-func (sm *SupportManager) maybeAddStores() {
+func (sm *SupportManager) maybeAddStores(ctx context.Context) {
sta := sm.storesToAdd.drainStoresToAdd()
for _, store := range sta {
if sm.requesterStateHandler.addStore(store) {
- log.VInfof(
- context.Background(), 2, "store %+v is starting to request support from store %+v",
- sm.storeID, store,
- )
+ log.Infof(ctx, "starting to heartbeat store %+v", store)
sm.metrics.SupportFromStores.Inc(1)
}
}
@@ -294,12 +295,12 @@ func (sm *SupportManager) sendHeartbeats(ctx context.Context) {
if sent := sm.sender.SendAsync(ctx, msg); sent {
successes++
} else {
- log.Warningf(ctx, "sending heartbeat to store %+v failed", msg.To)
+ log.Warningf(ctx, "failed to send heartbeat to store %+v", msg.To)
}
}
sm.metrics.HeartbeatSuccesses.Inc(int64(successes))
sm.metrics.HeartbeatFailures.Inc(int64(len(heartbeats) - successes))
- log.VInfof(ctx, 2, "store %+v sent heartbeats to %d stores", sm.storeID, successes)
+ log.VInfof(ctx, 2, "sent heartbeats to %d stores", successes)
}
// withdrawSupport delegates support withdrawal to supporterStateHandler.
@@ -311,7 +312,7 @@ func (sm *SupportManager) withdrawSupport(ctx context.Context) {
}
ssfu := sm.supporterStateHandler.checkOutUpdate()
defer sm.supporterStateHandler.finishUpdate(ssfu)
- numWithdrawn := ssfu.withdrawSupport(now)
+ numWithdrawn := ssfu.withdrawSupport(ctx, now)
if numWithdrawn == 0 {
// No support to withdraw.
return
@@ -330,7 +331,7 @@ func (sm *SupportManager) withdrawSupport(ctx context.Context) {
return
}
sm.supporterStateHandler.checkInUpdate(ssfu)
- log.VInfof(ctx, 2, "store %+v withdrew support from %d stores", sm.storeID, numWithdrawn)
+ log.Infof(ctx, "withdrew support from %d stores", numWithdrawn)
sm.metrics.SupportWithdrawSuccesses.Inc(int64(numWithdrawn))
}
@@ -338,7 +339,7 @@ func (sm *SupportManager) withdrawSupport(ctx context.Context) {
// to either the requesterStateHandler or supporterStateHandler. It then writes
// all updates to disk in a single batch, and sends any responses via Transport.
func (sm *SupportManager) handleMessages(ctx context.Context, msgs []*slpb.Message) {
- log.VInfof(ctx, 2, "store %+v drained receive queue of size %d", sm.storeID, len(msgs))
+ log.VInfof(ctx, 2, "drained receive queue of size %d", len(msgs))
rsfu := sm.requesterStateHandler.checkOutUpdate()
defer sm.requesterStateHandler.finishUpdate(rsfu)
ssfu := sm.supporterStateHandler.checkOutUpdate()
@@ -349,11 +350,11 @@ func (sm *SupportManager) handleMessages(ctx context.Context, msgs []*slpb.Messa
sm.metrics.ReceiveQueueBytes.Dec(int64(msg.Size()))
switch msg.Type {
case slpb.MsgHeartbeat:
- responses = append(responses, ssfu.handleHeartbeat(msg))
+ responses = append(responses, ssfu.handleHeartbeat(ctx, msg))
case slpb.MsgHeartbeatResp:
- rsfu.handleHeartbeatResponse(msg)
+ rsfu.handleHeartbeatResponse(ctx, msg)
default:
- log.Errorf(context.Background(), "unexpected message type: %v", msg.Type)
+ log.Errorf(ctx, "unexpected message type: %v", msg.Type)
}
}
@@ -383,7 +384,7 @@ func (sm *SupportManager) handleMessages(ctx context.Context, msgs []*slpb.Messa
for _, response := range responses {
_ = sm.sender.SendAsync(ctx, response)
}
- log.VInfof(ctx, 2, "store %+v sent %d responses", sm.storeID, len(responses))
+ log.VInfof(ctx, 2, "sent %d heartbeat responses", len(responses))
}
// maxReceiveQueueSize is the maximum number of messages the receive queue can
diff --git a/pkg/kv/kvserver/storeliveness/support_manager_test.go b/pkg/kv/kvserver/storeliveness/support_manager_test.go
index 4727598d4581..d3c5c5bc59ad 100644
--- a/pkg/kv/kvserver/storeliveness/support_manager_test.go
+++ b/pkg/kv/kvserver/storeliveness/support_manager_test.go
@@ -247,7 +247,7 @@ func TestSupportManagerRestart(t *testing.T) {
manual.Pause()
manualBehind.Pause()
sm.SupportFrom(remoteStore)
- sm.maybeAddStores()
+ sm.maybeAddStores(ctx)
sm.sendHeartbeats(ctx)
requestedTime := sm.requesterStateHandler.requesterState.meta.MaxRequested
heartbeatResp := &slpb.Message{
@@ -311,7 +311,7 @@ func TestSupportManagerDiskStall(t *testing.T) {
// Establish support for and from the remote store.
sm.SupportFrom(remoteStore)
- sm.maybeAddStores()
+ sm.maybeAddStores(ctx)
sm.sendHeartbeats(ctx)
requestedTime := sm.requesterStateHandler.requesterState.meta.MaxRequested
heartbeatResp := &slpb.Message{
diff --git a/pkg/kv/kvserver/storeliveness/supporter_state.go b/pkg/kv/kvserver/storeliveness/supporter_state.go
index b2edc08037e7..ac0cd49bf211 100644
--- a/pkg/kv/kvserver/storeliveness/supporter_state.go
+++ b/pkg/kv/kvserver/storeliveness/supporter_state.go
@@ -12,6 +12,7 @@ import (
slpb "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness/storelivenesspb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
+ "github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
)
@@ -248,7 +249,9 @@ func (ssh *supporterStateHandler) finishUpdate(ssfu *supporterStateForUpdate) {
// handleHeartbeat handles a single heartbeat message. It updates the inProgress
// view of supporterStateForUpdate only if there are any changes, and returns
// a heartbeat response message.
-func (ssfu *supporterStateForUpdate) handleHeartbeat(msg *slpb.Message) slpb.Message {
+func (ssfu *supporterStateForUpdate) handleHeartbeat(
+ ctx context.Context, msg *slpb.Message,
+) slpb.Message {
from := msg.From
ss, ok := ssfu.getSupportFor(from)
if !ok {
@@ -257,6 +260,7 @@ func (ssfu *supporterStateForUpdate) handleHeartbeat(msg *slpb.Message) slpb.Mes
ssNew := handleHeartbeat(ss, msg)
if ss != ssNew {
ssfu.inProgress.supportFor[from] = ssNew
+ logSupportForChange(ctx, ss, ssNew)
}
return slpb.Message{
Type: slpb.MsgHeartbeatResp,
@@ -270,24 +274,38 @@ func (ssfu *supporterStateForUpdate) handleHeartbeat(msg *slpb.Message) slpb.Mes
// handleHeartbeat contains the core logic for updating the epoch and expiration
// of a support requester upon receiving a heartbeat.
func handleHeartbeat(ss slpb.SupportState, msg *slpb.Message) slpb.SupportState {
+ assert(!msg.Expiration.IsEmpty(), "requested support with zero expiration")
if ss.Epoch == msg.Epoch {
ss.Expiration.Forward(msg.Expiration)
} else if ss.Epoch < msg.Epoch {
- assert(
- ss.Expiration.Less(msg.Expiration), "support expiration regression across epochs",
- )
+ assert(ss.Expiration.Less(msg.Expiration), "support expiration regression across epochs")
ss.Epoch = msg.Epoch
ss.Expiration = msg.Expiration
}
return ss
}
+// logSupportForChange logs the old and new support state after handling a
+// heartbeat.
+func logSupportForChange(ctx context.Context, ss slpb.SupportState, ssNew slpb.SupportState) {
+ assert(!ssNew.Expiration.IsEmpty(), "requested support with zero expiration")
+ if ss.Epoch == ssNew.Epoch && !ss.Expiration.IsEmpty() {
+ if log.ExpensiveLogEnabled(ctx, 3) {
+ log.VInfof(ctx, 3, "extended support for %s", supportChangeStr(ss, ssNew))
+ }
+ } else {
+ log.Infof(ctx, "provided support for %s", supportChangeStr(ss, ssNew))
+ }
+}
+
// Functions for withdrawing support.
// withdrawSupport handles a single support withdrawal. It updates the
// inProgress view of supporterStateForUpdate only if there are any changes.
// The function returns the number of stores for which support was withdrawn.
-func (ssfu *supporterStateForUpdate) withdrawSupport(now hlc.ClockTimestamp) (numWithdrawn int) {
+func (ssfu *supporterStateForUpdate) withdrawSupport(
+ ctx context.Context, now hlc.ClockTimestamp,
+) (numWithdrawn int) {
// Assert that there are no updates in ssfu.inProgress.supportFor to make
// sure we can iterate over ssfu.checkedIn.supportFor in the loop below.
assert(
@@ -298,6 +316,7 @@ func (ssfu *supporterStateForUpdate) withdrawSupport(now hlc.ClockTimestamp) (nu
ssNew := maybeWithdrawSupport(ss, now)
if ss != ssNew {
ssfu.inProgress.supportFor[id] = ssNew
+ log.Infof(ctx, "withdrew support for %s", supportChangeStr(ss, ssNew))
meta := ssfu.getMeta()
if meta.MaxWithdrawn.Forward(now) {
ssfu.inProgress.meta = meta
diff --git a/pkg/kv/kvserver/storeliveness/testdata/requester_state b/pkg/kv/kvserver/storeliveness/testdata/requester_state
index 1566681894a6..de6c16360756 100644
--- a/pkg/kv/kvserver/storeliveness/testdata/requester_state
+++ b/pkg/kv/kvserver/storeliveness/testdata/requester_state
@@ -121,6 +121,14 @@ support-from node-id=2 store-id=2
----
epoch: 2, expiration: 410.000000000,0
+handle-messages
+ msg type=MsgHeartbeatResp from-node-id=2 from-store-id=2 epoch=2 expiration=0
+----
+
+support-from node-id=2 store-id=2
+----
+epoch: 2, expiration: 410.000000000,0
+
# -------------------------------------------------------------
# Store (n1, s1) requests support but receives no response.
@@ -220,6 +228,6 @@ heartbeats:
debug-metrics
----
HeartbeatSuccess: 8, HeartbeatFailure: 1
-MessageHandleSuccess: 7, MessageHandleFailure: 0
+MessageHandleSuccess: 8, MessageHandleFailure: 0
SupportWithdrawSuccess: 0, SupportWithdrawFailure: 0
SupportFromStores: 1, SupportForStores: 0
diff --git a/pkg/kv/kvserver/storeliveness/unreliable_store_liveness_handler.go b/pkg/kv/kvserver/storeliveness/unreliable_store_liveness_handler.go
deleted file mode 100644
index daec32add398..000000000000
--- a/pkg/kv/kvserver/storeliveness/unreliable_store_liveness_handler.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2024 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package storeliveness
-
-import "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness/storelivenesspb"
-
-type UnreliableHandlerFuncs struct {
- DropStoreLivenessMsg func(*storelivenesspb.Message) bool
-}
-
-// UnreliableHandler allows users to selectively drop StoreLiveness messages.
-type UnreliableHandler struct {
- Name string
- MessageHandler
- UnreliableHandlerFuncs
-}
-
-var _ MessageHandler = &UnreliableHandler{}
-
-// HandleMessage implements the MessageHandler interface.
-func (h *UnreliableHandler) HandleMessage(msg *storelivenesspb.Message) error {
- if h.DropStoreLivenessMsg(msg) {
- return nil
- }
-
- return h.MessageHandler.HandleMessage(msg)
-}
diff --git a/pkg/kv/kvserver/stores.go b/pkg/kv/kvserver/stores.go
index 68f12e98b48d..c204855ddd62 100644
--- a/pkg/kv/kvserver/stores.go
+++ b/pkg/kv/kvserver/stores.go
@@ -204,13 +204,12 @@ func (ls *Stores) SendWithWriteBytes(
// RangeFeed registers a rangefeed over the specified span. It sends
// updates to the provided stream and returns a future with an optional error
// when the rangefeed is complete.
-func (ls *Stores) RangeFeed(
- streamCtx context.Context, args *kvpb.RangeFeedRequest, stream rangefeed.Stream,
-) error {
+func (ls *Stores) RangeFeed(args *kvpb.RangeFeedRequest, stream rangefeed.Stream) error {
+ ctx := stream.Context()
if args.RangeID == 0 {
- log.Fatal(streamCtx, "rangefeed request missing range ID")
+ log.Fatal(ctx, "rangefeed request missing range ID")
} else if args.Replica.StoreID == 0 {
- log.Fatal(streamCtx, "rangefeed request missing store ID")
+ log.Fatal(ctx, "rangefeed request missing store ID")
}
store, err := ls.GetStore(args.Replica.StoreID)
@@ -218,7 +217,7 @@ func (ls *Stores) RangeFeed(
return err
}
- return store.RangeFeed(streamCtx, args, stream)
+ return store.RangeFeed(args, stream)
}
// ReadBootstrapInfo implements the gossip.Storage interface. Read
diff --git a/pkg/kv/kvserver/testing_knobs.go b/pkg/kv/kvserver/testing_knobs.go
index 8cd13c0e2cd4..7e2d38e30dc5 100644
--- a/pkg/kv/kvserver/testing_knobs.go
+++ b/pkg/kv/kvserver/testing_knobs.go
@@ -279,7 +279,7 @@ type StoreTestingKnobs struct {
// RefreshReasonTicksPeriod overrides the default period over which
// pending commands are refreshed. The period is specified as a multiple
// of Raft group ticks.
- RefreshReasonTicksPeriod int64
+ RefreshReasonTicksPeriod int
// DisableProcessRaft disables the process raft loop.
DisableProcessRaft func(roachpb.StoreID) bool
// DisableLastProcessedCheck disables checking on replica queue last processed times.
diff --git a/pkg/raft/raft.go b/pkg/raft/raft.go
index 24ade33584ef..faf6bee0067e 100644
--- a/pkg/raft/raft.go
+++ b/pkg/raft/raft.go
@@ -75,16 +75,16 @@ var ErrProposalDropped = errors.New("raft proposal dropped")
// lockedRand is a small wrapper around rand.Rand to provide
// synchronization among multiple raft groups. Only the methods needed
-// by the code are exposed (e.g. Int63n).
+// by the code are exposed (e.g. Intn).
type lockedRand struct {
mu sync.Mutex
}
-func (r *lockedRand) Int63n(n int64) int64 {
+func (r *lockedRand) Intn(n int) int {
r.mu.Lock()
- v, _ := rand.Int(rand.Reader, big.NewInt(n))
+ v, _ := rand.Int(rand.Reader, big.NewInt(int64(n)))
r.mu.Unlock()
- return v.Int64()
+ return int(v.Int64())
}
var globalRand = &lockedRand{}
@@ -105,11 +105,11 @@ type Config struct {
// candidate and start an election. ElectionTick must be greater than
// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
// unnecessary leader switching.
- ElectionTick int64
+ ElectionTick int
// HeartbeatTick is the number of Node.Tick invocations that must pass between
// heartbeats. That is, a leader sends heartbeat messages to maintain its
// leadership every HeartbeatTick ticks.
- HeartbeatTick int64
+ HeartbeatTick int
// Storage is the storage for raft. raft generates entries and states to be
// stored in storage. raft reads the persisted entries and states out of
@@ -390,14 +390,13 @@ type raft struct {
// term changes.
uncommittedSize entryPayloadSize
- // electionElapsed is tracked by both leaders and followers. For followers, it
- // is the number of ticks since they last received a valid message from the
- // from the current leader, unless the follower is fortifying a leader
- // (leadEpoch != 0), in which case it is always set to 0. For leaders, it is
- // the number of ticks since the last time it performed a checkQuorum.
+ // electionElapsed is the number of ticks since we last reached the
+ // electionTimeout. Tracked by both leaders and followers alike. Additionally,
+ // followers also reset this field whenever they receive a valid message from
+ // the current leader or if the leader is fortified when ticked.
//
// Invariant: electionElapsed = 0 when r.leadEpoch != 0 on a follower.
- electionElapsed int64
+ electionElapsed int
// heartbeatElapsed is the number of ticks since we last reached the
// heartbeatTimeout. Leaders use this field to keep track of when they should
@@ -408,19 +407,19 @@ type raft struct {
// TODO(arul): consider renaming these to "fortifyElapsed" given heartbeats
// are no longer the first class concept they used to be pre-leader
// fortification.
- heartbeatElapsed int64
+ heartbeatElapsed int
maxInflight int
maxInflightBytes uint64
checkQuorum bool
preVote bool
- heartbeatTimeout int64
- electionTimeout int64
+ heartbeatTimeout int
+ electionTimeout int
// randomizedElectionTimeout is a random number between
// [electiontimeout, 2 * electiontimeout - 1]. It gets reset
// when raft changes its state to follower or candidate.
- randomizedElectionTimeout int64
+ randomizedElectionTimeout int
disableProposalForwarding bool
tick func()
@@ -1065,8 +1064,7 @@ func (r *raft) setTerm(term uint64) {
assertTrue(term > r.Term, "term cannot regress")
r.Term = term
r.Vote = None
- r.lead = None
- r.leadEpoch = 0
+ r.resetLead()
}
func (r *raft) setVote(id pb.PeerID) {
@@ -1087,7 +1085,7 @@ func (r *raft) setLead(lead pb.PeerID) {
func (r *raft) resetLead() {
r.lead = None
- r.leadEpoch = 0
+ r.resetLeadEpoch()
}
func (r *raft) setLeadEpoch(leadEpoch pb.Epoch) {
@@ -1175,7 +1173,7 @@ func (r *raft) tickElection() {
// 2. But we do want to take advantage of randomized election timeouts built
// into raft to prevent hung elections.
// We achieve both of these goals by "forwarding" electionElapsed to begin
- // at r.electionTimeout. Also see atRandomizedElectionTimeout.
+ // at r.electionTimeout. Also see pastElectionTimeout.
r.logger.Debugf(
"%d setting election elapsed to start from %d ticks after store liveness support expired",
r.id, r.electionTimeout,
@@ -1185,7 +1183,8 @@ func (r *raft) tickElection() {
r.electionElapsed++
}
- if r.atRandomizedElectionTimeout() {
+ if r.promotable() && r.pastElectionTimeout() {
+ r.electionElapsed = 0
if err := r.Step(pb.Message{From: r.id, Type: pb.MsgHup}); err != nil {
r.logger.Debugf("error occurred during election: %v", err)
}
@@ -1344,7 +1343,7 @@ func (r *raft) hup(t CampaignType) {
return
}
if !r.promotable() {
- r.logger.Infof("%x is unpromotable and can not campaign", r.id)
+ r.logger.Warningf("%x is unpromotable and can not campaign", r.id)
return
}
// NB: The leader is allowed to bump its term by calling an election. Note that
@@ -1537,25 +1536,28 @@ func (r *raft) Step(m pb.Message) error {
default:
r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
r.id, r.Term, m.Type, m.From, m.Term)
- if IsMsgFromLeader(m.Type) {
- // We've just received a message from a leader which was elected
- // at a higher term. The old leader is no longer fortified, so it's
- // safe to de-fortify at this point.
+ if IsMsgIndicatingLeader(m.Type) {
+ // We've just received a message that indicates that a new leader
+ // was elected at a higher term, but the message may not be from the
+ // leader itself. Either way, the old leader is no longer fortified,
+ // so it's safe to de-fortify at this point.
r.deFortify(m.From, m.Term)
- r.becomeFollower(m.Term, m.From)
+ var lead pb.PeerID
+ if IsMsgFromLeader(m.Type) {
+ lead = m.From
+ }
+ r.becomeFollower(m.Term, lead)
} else {
+ // We've just received a message that does not indicate that a new
+ // leader was elected at a higher term. All it means is that some
+ // other peer has this term.
r.becomeFollower(m.Term, None)
}
}
case m.Term < r.Term:
- ignore := true
-
- switch m.Type {
- case pb.MsgHeartbeat, pb.MsgApp, pb.MsgFortifyLeader:
- if !r.checkQuorum && !r.preVote {
- break
- }
+ if (r.checkQuorum || r.preVote) &&
+ (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp || m.Type == pb.MsgFortifyLeader) {
// We have received messages from a leader at a lower term. It is possible
// that these messages were simply delayed in the network, but this could
// also mean that this node has advanced its term number during a network
@@ -1578,9 +1580,7 @@ func (r *raft) Step(m pb.Message) error {
// However, this disruption is inevitable to free this stuck node with
// fresh election. This can be prevented with Pre-Vote phase.
r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
- return nil
-
- case pb.MsgPreVote:
+ } else if m.Type == pb.MsgPreVote {
// Before Pre-Vote enable, there may have candidate with higher term,
// but less log. After update to Pre-Vote, the cluster may deadlock if
// we drop messages with a lower term.
@@ -1589,31 +1589,12 @@ func (r *raft) Step(m pb.Message) error {
r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
r.id, last.term, last.index, r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true})
- return nil
-
- case pb.MsgSnap:
- // A snapshot message may arrive under an outdated term. Since it carries
- // committed state, we can safely process it regardless of the term. The
- // message term means the snapshot is committed as of this term. By raft
- // invariants, all committed state under a particular term will be
- // committed under later terms as well.
- //
- // TODO(#127348): the MsgSnap handler assumes the message came from this
- // term leader, which is not true if the term is bumped here.
- // TODO(#127349): it is generally not true because the snapshot could have
- // been initiated by a leaseholder (which at the time of writing is not
- // necessarily the leader), and/or delegated via a follower.
- m.Term = r.Term
- ignore = false
- }
-
- // Ignore the message if it has not been handled above and can not be
- // handled below.
- if ignore {
+ } else {
+ // ignore other cases
r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
r.id, r.Term, m.Type, m.From, m.Term)
- return nil
}
+ return nil
}
switch m.Type {
@@ -2643,19 +2624,36 @@ func (r *raft) loadState(state pb.HardState) {
r.raftLog.committed = state.Commit
r.setTerm(state.Term)
r.setVote(state.Vote)
- r.setLead(state.Lead)
- r.setLeadEpoch(state.LeadEpoch)
+ if state.LeadEpoch != 0 {
+ // A non-zero lead epoch indicates that the leader fortified its term.
+ // Fortification promises should hold true across restarts, so we need to
+ // restore both the lead and the lead epoch.
+ //
+ // In cases where the leader wasn't fortified prior to the restart, we
+ // eschew loading the leader known to this peer before shutdown. This
+ // maintains parity with how raft restarts worked before the fortification
+ // protocol was introduced. While it isn't incorrect to load the leader, it
+ // does trip the inHeartbeatLease condition without considerable care.
+ // Tripping the inHeartbeatLease condition can delay leader elections by 2s.
+ // This is known to be a source of regressions, which become meaningful when
+ // the 2s delay stacks by a O(ranges) factor. Epoch based leases which are
+ // quiesced before a restart are particularly vulnerable to such
+ // regressions. Not loading the leader if the leader wasn't fortified is a
+ // way by which we avoid this known and possibly other unknown regressions.
+ r.setLead(state.Lead)
+ r.setLeadEpoch(state.LeadEpoch)
+ }
}
-// atRandomizedElectionTimeout returns true if r.electionElapsed modulo the
-// r.randomizedElectionTimeout is equal to 0. This means that at every
-// r.randomizedElectionTimeout period, this method will return true once.
-func (r *raft) atRandomizedElectionTimeout() bool {
- return r.electionElapsed != 0 && r.electionElapsed%r.randomizedElectionTimeout == 0
+// pastElectionTimeout returns true if r.electionElapsed is greater
+// than or equal to the randomized election timeout in
+// [electiontimeout, 2 * electiontimeout - 1].
+func (r *raft) pastElectionTimeout() bool {
+ return r.electionElapsed >= r.randomizedElectionTimeout
}
func (r *raft) resetRandomizedElectionTimeout() {
- r.randomizedElectionTimeout = r.electionTimeout + globalRand.Int63n(r.electionTimeout)
+ r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
}
func (r *raft) transferLeader(to pb.PeerID) {
diff --git a/pkg/raft/raft_flow_control_test.go b/pkg/raft/raft_flow_control_test.go
index 47eeecbc0f6d..b15163b2105b 100644
--- a/pkg/raft/raft_flow_control_test.go
+++ b/pkg/raft/raft_flow_control_test.go
@@ -21,7 +21,6 @@ import (
"testing"
pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb"
- "github.com/cockroachdb/cockroach/pkg/raft/raftstoreliveness"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/stretchr/testify/require"
)
@@ -113,7 +112,7 @@ func TestMsgAppFlowControl(t *testing.T) {
func(t *testing.T, storeLivenessEnabled bool) {
testOptions := emptyTestConfigModifierOpt()
if !storeLivenessEnabled {
- testOptions = withStoreLiveness(raftstoreliveness.Disabled{})
+ testOptions = withFortificationDisabled()
}
r := newTestRaft(1, 5, 1,
diff --git a/pkg/raft/raft_paper_test.go b/pkg/raft/raft_paper_test.go
index 2fd5a471e304..067eb94fad94 100644
--- a/pkg/raft/raft_paper_test.go
+++ b/pkg/raft/raft_paper_test.go
@@ -37,7 +37,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/raft/raftlogger"
pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb"
- "github.com/cockroachdb/cockroach/pkg/raft/raftstoreliveness"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -110,13 +109,13 @@ func TestStartAsFollower(t *testing.T) {
// every heartbeat interval, but it won't send a MsgHeartbeat.
func TestLeaderBcastBeat(t *testing.T) {
// heartbeat interval
- hi := int64(3)
+ hi := 3
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testOptions := emptyTestConfigModifierOpt()
if !storeLivenessEnabled {
- testOptions = withStoreLiveness(raftstoreliveness.Disabled{})
+ testOptions = withFortificationDisabled()
}
r := newTestRaft(1, 10, hi,
@@ -129,7 +128,7 @@ func TestLeaderBcastBeat(t *testing.T) {
mustAppendEntry(r, pb.Entry{Index: uint64(i) + 1})
}
- for i := int64(0); i < hi; i++ {
+ for i := 0; i < hi; i++ {
require.Empty(t, r.readMessages())
r.tick()
}
@@ -171,7 +170,7 @@ func TestCandidateStartNewElection(t *testing.T) {
// Reference: section 5.2
func testNonleaderStartElection(t *testing.T, state pb.StateType) {
// election timeout
- et := int64(10)
+ et := 10
r := newTestRaft(1, et, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
switch state {
case pb.StateFollower:
@@ -180,7 +179,7 @@ func testNonleaderStartElection(t *testing.T, state pb.StateType) {
r.becomeCandidate()
}
- for i := int64(1); i < 2*et; i++ {
+ for i := 1; i < 2*et; i++ {
r.tick()
}
r.advanceMessagesAfterAppend()
@@ -307,10 +306,10 @@ func TestCandidateElectionTimeoutRandomized(t *testing.T) {
// follower or candidate is randomized.
// Reference: section 5.2
func testNonleaderElectionTimeoutRandomized(t *testing.T, state pb.StateType) {
- et := int64(10)
+ et := 10
r := newTestRaft(1, et, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
- timeouts := make(map[int64]bool)
- for round := int64(0); round < 50*et; round++ {
+ timeouts := make(map[int]bool)
+ for round := 0; round < 50*et; round++ {
switch state {
case pb.StateFollower:
r.becomeFollower(r.Term+1, 2)
@@ -318,7 +317,7 @@ func testNonleaderElectionTimeoutRandomized(t *testing.T, state pb.StateType) {
r.becomeCandidate()
}
- time := int64(0)
+ time := 0
for len(r.readMessages()) == 0 {
r.tick()
time++
@@ -347,7 +346,7 @@ func TestCandidatesElectionTimeoutNonconflict(t *testing.T) {
// likelihood of split vote in the new election.
// Reference: section 5.2
func testNonleadersElectionTimeoutNonconflict(t *testing.T, state pb.StateType) {
- et := int64(10)
+ et := 10
size := 5
rs := make([]*raft, size)
ids := idsBySize(size)
@@ -712,7 +711,7 @@ func TestVoteRequest(t *testing.T) {
})
r.readMessages()
- for i := int64(1); i < r.electionTimeout*2; i++ {
+ for i := 1; i < r.electionTimeout*2; i++ {
r.tickElection()
}
diff --git a/pkg/raft/raft_test.go b/pkg/raft/raft_test.go
index 13f3ce6ae1be..c7461e34496d 100644
--- a/pkg/raft/raft_test.go
+++ b/pkg/raft/raft_test.go
@@ -346,7 +346,7 @@ func TestLearnerElectionTimeout(t *testing.T) {
// n2 is learner. Learner should not start election even when times out.
setRandomizedElectionTimeout(n2, n2.electionTimeout)
- for i := int64(0); i < n2.electionTimeout; i++ {
+ for i := 0; i < n2.electionTimeout; i++ {
n2.tick()
}
@@ -356,10 +356,9 @@ func TestLearnerElectionTimeout(t *testing.T) {
// TestLearnerPromotion verifies that the learner should not election until
// it is promoted to a normal peer.
func TestLearnerPromotion(t *testing.T) {
- n1 := newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ n1 := newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)), withFortificationDisabled())
+
+ n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)), withFortificationDisabled())
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
@@ -370,7 +369,7 @@ func TestLearnerPromotion(t *testing.T) {
// n1 should become leader
setRandomizedElectionTimeout(n1, n1.electionTimeout)
- for i := int64(0); i < n1.electionTimeout; i++ {
+ for i := 0; i < n1.electionTimeout; i++ {
n1.tick()
}
n1.advanceMessagesAfterAppend()
@@ -386,7 +385,7 @@ func TestLearnerPromotion(t *testing.T) {
// n2 start election, should become leader
setRandomizedElectionTimeout(n2, n2.electionTimeout)
- for i := int64(0); i < n2.electionTimeout; i++ {
+ for i := 0; i < n2.electionTimeout; i++ {
n2.tick()
}
n2.advanceMessagesAfterAppend()
@@ -636,7 +635,7 @@ func TestLearnerLogReplication(t *testing.T) {
n2.becomeFollower(1, None)
setRandomizedElectionTimeout(n1, n1.electionTimeout)
- for i := int64(0); i < n1.electionTimeout; i++ {
+ for i := 0; i < n1.electionTimeout; i++ {
n1.tick()
}
n1.advanceMessagesAfterAppend()
@@ -676,19 +675,7 @@ func TestSingleNodeCommit(t *testing.T) {
// when leader changes, no new proposal comes in and ChangeTerm proposal is
// filtered.
func TestCannotCommitWithoutNewTermEntry(t *testing.T) {
- testutils.RunTrueAndFalse(t, "store-liveness-enabled",
- func(t *testing.T, storeLivenessEnabled bool) {
- testCannotCommitWithoutNewTermEntry(t, storeLivenessEnabled)
- })
-}
-
-func testCannotCommitWithoutNewTermEntry(t *testing.T, storeLivenessEnabled bool) {
- var cfg func(c *Config) = nil
- if !storeLivenessEnabled {
- cfg = fortificationDisabledConfig
- }
-
- tt := newNetworkWithConfig(cfg, nil, nil, nil, nil, nil)
+ tt := newNetworkWithConfig(fortificationDisabledConfig, nil, nil, nil, nil, nil)
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// 0 cannot reach 2,3,4
@@ -707,19 +694,9 @@ func testCannotCommitWithoutNewTermEntry(t *testing.T, storeLivenessEnabled bool
// avoid committing ChangeTerm proposal
tt.ignore(pb.MsgApp)
- // Elect 2 as the new leader with term 2.
- if storeLivenessEnabled {
- // We need to withdraw support of the current leader.
- tt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
- }
-
+ // elect 2 as the new leader with term 2
tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
- if storeLivenessEnabled {
- // Restore the support state.
- tt.livenessFabric.GrantSupportForPeerFromAllPeers(1)
- }
-
// no log entries from previous term should be committed
sm = tt.peers[2].(*raft)
assert.Equal(t, uint64(1), sm.raftLog.committed)
@@ -765,9 +742,9 @@ func TestDuelingCandidates(t *testing.T) {
s1 := newTestMemoryStorage(withPeers(1, 2, 3))
s2 := newTestMemoryStorage(withPeers(1, 2, 3))
s3 := newTestMemoryStorage(withPeers(1, 2, 3))
- a := newTestRaft(1, 10, 1, s1, withStoreLiveness(raftstoreliveness.Disabled{}))
- b := newTestRaft(2, 10, 1, s2, withStoreLiveness(raftstoreliveness.Disabled{}))
- c := newTestRaft(3, 10, 1, s3, withStoreLiveness(raftstoreliveness.Disabled{}))
+ a := newTestRaft(1, 10, 1, s1, withFortificationDisabled())
+ b := newTestRaft(2, 10, 1, s2, withFortificationDisabled())
+ c := newTestRaft(3, 10, 1, s3, withFortificationDisabled())
nt := newNetwork(a, b, c)
nt.cut(1, 3)
@@ -810,12 +787,9 @@ func TestDuelingCandidates(t *testing.T) {
}
func TestDuelingPreCandidates(t *testing.T) {
- cfgA := newTestConfig(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- cfgB := newTestConfig(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- cfgC := newTestConfig(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ cfgA := newTestConfig(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
+ cfgB := newTestConfig(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
+ cfgC := newTestConfig(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
cfgA.PreVote = true
cfgB.PreVote = true
cfgC.PreVote = true
@@ -1081,51 +1055,27 @@ func TestCommit(t *testing.T) {
}
}
-// TestAtRandomizedElectionTimeout tests that the followers who call
-// atRandomizedElectionTimeout() will campaign uniformly randomly between the
-// range of [electionTimeout, 2 * electionTimeout - 1].
-func TestAtRandomizedElectionTimeout(t *testing.T) {
+func TestPastElectionTimeout(t *testing.T) {
tests := []struct {
- electionElapsed int64
- // wprobability is the expected probability of an election at
- // the given electionElapsed.
+ elapse int
wprobability float64
round bool
}{
- // randomizedElectionTimeout = [10,20).
- // electionElapsed less than the electionTimeout should never campaign.
- {0, 0, false},
{5, 0, false},
- {9, 0, false},
-
- // Since there are 10 possible values for randomizedElectionTimeout, we
- // expect the probability to be 1/10 for each value.
{10, 0.1, true},
- {13, 0.1, true},
- {15, 0.1, true},
- {18, 0.1, true},
- {20, 0.1, true},
-
- // No possible value of randomizedElectionTimeout [10,20) would cause an
- // election at electionElapsed = 21.
- {21, 0, false},
-
- // Only one out of ten values of randomizedElectionTimeout (11) leads to
- // election at electionElapsed = 22.
- {22, 0.1, true},
-
- // Two out of ten values of randomizedElectionTimeout (10, 11) would lead
- // to election at electionElapsed = 120.
- {110, 0.2, true},
+ {13, 0.4, true},
+ {15, 0.6, true},
+ {18, 0.9, true},
+ {20, 1, false},
}
for i, tt := range tests {
sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
- sm.electionElapsed = tt.electionElapsed
+ sm.electionElapsed = tt.elapse
c := 0
for j := 0; j < 10000; j++ {
sm.resetRandomizedElectionTimeout()
- if sm.atRandomizedElectionTimeout() {
+ if sm.pastElectionTimeout() {
c++
}
}
@@ -1236,7 +1186,7 @@ func TestHandleHeartbeatRespStoreLivenessDisabled(t *testing.T) {
storage := newTestMemoryStorage(withPeers(1, 2))
require.NoError(t, storage.SetHardState(pb.HardState{Term: 3}))
require.NoError(t, storage.Append(index(1).terms(1, 2, 3)))
- sm := newTestRaft(1, 5, 1, storage, withStoreLiveness(raftstoreliveness.Disabled{}))
+ sm := newTestRaft(1, 5, 1, storage, withFortificationDisabled())
sm.becomeCandidate()
sm.becomeLeader()
sm.raftLog.commitTo(sm.raftLog.unstable.mark())
@@ -1567,12 +1517,15 @@ func TestCandidateResetTermMsgApp(t *testing.T) {
// MsgHeartbeat or MsgApp from leader, "Step" resets the term
// with leader's and reverts back to follower.
func testCandidateResetTerm(t *testing.T, mt pb.MessageType) {
- a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ a := newTestRaft(
+ 1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
+ b := newTestRaft(
+ 2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
+ c := newTestRaft(
+ 3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
nt := newNetwork(a, b, c)
@@ -1593,7 +1546,7 @@ func testCandidateResetTerm(t *testing.T, mt pb.MessageType) {
// trigger campaign in isolated c
c.resetRandomizedElectionTimeout()
- for i := int64(0); i < c.randomizedElectionTimeout; i++ {
+ for i := 0; i < c.randomizedElectionTimeout; i++ {
c.tick()
}
c.advanceMessagesAfterAppend()
@@ -1712,7 +1665,7 @@ func TestLeaderStepdownWhenQuorumActive(t *testing.T) {
sm.becomeCandidate()
sm.becomeLeader()
- for i := int64(0); i < sm.electionTimeout+1; i++ {
+ for i := 0; i < sm.electionTimeout+1; i++ {
sm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp, Term: sm.Term})
sm.tick()
}
@@ -1721,15 +1674,14 @@ func TestLeaderStepdownWhenQuorumActive(t *testing.T) {
}
func TestLeaderStepdownWhenQuorumLost(t *testing.T) {
- sm := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ sm := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
sm.checkQuorum = true
sm.becomeCandidate()
sm.becomeLeader()
- for i := int64(0); i < sm.electionTimeout+1; i++ {
+ for i := 0; i < sm.electionTimeout+1; i++ {
sm.tick()
}
@@ -1737,12 +1689,14 @@ func TestLeaderStepdownWhenQuorumLost(t *testing.T) {
}
func TestLeaderSupersedingWithCheckQuorum(t *testing.T) {
- a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ a := newTestRaft(
+ 1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
+ b := newTestRaft(
+ 2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
+ c := newTestRaft(
+ 3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
a.checkQuorum = true
b.checkQuorum = true
@@ -1751,7 +1705,7 @@ func TestLeaderSupersedingWithCheckQuorum(t *testing.T) {
nt := newNetwork(a, b, c)
setRandomizedElectionTimeout(b, b.electionTimeout+1)
- for i := int64(0); i < b.electionTimeout; i++ {
+ for i := 0; i < b.electionTimeout; i++ {
b.tick()
}
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
@@ -1765,7 +1719,7 @@ func TestLeaderSupersedingWithCheckQuorum(t *testing.T) {
assert.Equal(t, pb.StateCandidate, c.state)
// Letting b's electionElapsed reach to electionTimeout
- for i := int64(0); i < b.electionTimeout; i++ {
+ for i := 0; i < b.electionTimeout; i++ {
b.tick()
}
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
@@ -1774,12 +1728,15 @@ func TestLeaderSupersedingWithCheckQuorum(t *testing.T) {
}
func TestLeaderElectionWithCheckQuorum(t *testing.T) {
- a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ a := newTestRaft(
+ 1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
+ b := newTestRaft(
+ 2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
+ c := newTestRaft(
+ 3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
a.checkQuorum = true
b.checkQuorum = true
@@ -1800,10 +1757,10 @@ func TestLeaderElectionWithCheckQuorum(t *testing.T) {
// because the value might be reset to electionTimeout since the last state changes
setRandomizedElectionTimeout(a, a.electionTimeout+1)
setRandomizedElectionTimeout(b, b.electionTimeout+2)
- for i := int64(0); i < a.electionTimeout; i++ {
+ for i := 0; i < a.electionTimeout; i++ {
a.tick()
}
- for i := int64(0); i < b.electionTimeout; i++ {
+ for i := 0; i < b.electionTimeout; i++ {
b.tick()
}
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
@@ -1816,12 +1773,15 @@ func TestLeaderElectionWithCheckQuorum(t *testing.T) {
// can disrupt the leader even if the leader still "officially" holds the lease, The
// leader is expected to step down and adopt the candidate's term
func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {
- a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ a := newTestRaft(
+ 1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
+ b := newTestRaft(
+ 2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
+ c := newTestRaft(
+ 3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
a.checkQuorum = true
b.checkQuorum = true
@@ -1830,7 +1790,7 @@ func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {
nt := newNetwork(a, b, c)
setRandomizedElectionTimeout(b, b.electionTimeout+1)
- for i := int64(0); i < b.electionTimeout; i++ {
+ for i := 0; i < b.electionTimeout; i++ {
b.tick()
}
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
@@ -1876,7 +1836,7 @@ func TestNonPromotableVoterWithCheckQuorum(t *testing.T) {
require.False(t, b.promotable())
- for i := int64(0); i < b.electionTimeout; i++ {
+ for i := 0; i < b.electionTimeout; i++ {
b.tick()
}
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
@@ -1892,12 +1852,15 @@ func TestNonPromotableVoterWithCheckQuorum(t *testing.T) {
// candiate's response to late leader heartbeat forces the leader
// to step down.
func TestDisruptiveFollower(t *testing.T) {
- n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ n1 := newTestRaft(
+ 1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
+ n2 := newTestRaft(
+ 2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
+ n3 := newTestRaft(
+ 3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(),
+ )
n1.checkQuorum = true
n2.checkQuorum = true
@@ -1921,7 +1884,7 @@ func TestDisruptiveFollower(t *testing.T) {
// election timeouts (e.g. multi-datacenter deploy)
// Or leader messages are being delayed while ticks elapse
setRandomizedElectionTimeout(n3, n3.electionTimeout+2)
- for i := int64(0); i < n3.randomizedElectionTimeout-1; i++ {
+ for i := 0; i < n3.randomizedElectionTimeout-1; i++ {
n3.tick()
}
@@ -1973,12 +1936,9 @@ func TestDisruptiveFollower(t *testing.T) {
// Then pre-vote phase prevents this isolated node from forcing
// current leader to step down, thus less disruptions.
func TestDisruptiveFollowerPreVote(t *testing.T) {
- n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
+ n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
+ n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
n1.checkQuorum = true
n2.checkQuorum = true
@@ -2106,7 +2066,7 @@ func TestBcastBeat(t *testing.T) {
testOptions := emptyTestConfigModifierOpt()
if !storeLivenessEnabled {
- testOptions = withStoreLiveness(raftstoreliveness.Disabled{})
+ testOptions = withFortificationDisabled()
}
sm := newTestRaft(1, 10, 1, storage, testOptions)
@@ -2234,7 +2194,7 @@ func TestLeaderIncreaseNext(t *testing.T) {
func TestSendAppendForProgressProbeStoreLivenessDisabled(t *testing.T) {
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ withFortificationDisabled())
r.becomeCandidate()
r.becomeLeader()
@@ -2269,7 +2229,7 @@ func TestSendAppendForProgressProbeStoreLivenessDisabled(t *testing.T) {
}
// do a heartbeat
- for j := int64(0); j < r.heartbeatTimeout; j++ {
+ for j := 0; j < r.heartbeatTimeout; j++ {
r.tick()
}
assert.True(t, r.trk.Progress(2).MsgAppProbesPaused)
@@ -2327,7 +2287,7 @@ func TestSendAppendForProgressProbeStoreLivenessEnabled(t *testing.T) {
}
// The next heartbeat timeout will allow another message to be sent.
- for j := int64(0); j < r.heartbeatTimeout; j++ {
+ for j := 0; j < r.heartbeatTimeout; j++ {
r.tick()
}
assert.True(t, r.trk.Progress(2).MsgAppProbesPaused)
@@ -2410,7 +2370,7 @@ func TestRestore(t *testing.T) {
assert.Equal(t, s.snap.Metadata.ConfState.Voters, sm.trk.VoterNodes())
require.False(t, sm.restore(s))
- for i := int64(0); i < sm.randomizedElectionTimeout; i++ {
+ for i := 0; i < sm.randomizedElectionTimeout; i++ {
sm.tick()
}
assert.Equal(t, pb.StateFollower, sm.state)
@@ -2472,7 +2432,7 @@ func TestRestoreWithVotersOutgoing(t *testing.T) {
require.False(t, sm.restore(s))
// It should not campaign before actually applying data.
- for i := int64(0); i < sm.randomizedElectionTimeout; i++ {
+ for i := 0; i < sm.randomizedElectionTimeout; i++ {
sm.tick()
}
assert.Equal(t, pb.StateFollower, sm.state)
@@ -2548,7 +2508,7 @@ func TestLearnerReceiveSnapshot(t *testing.T) {
nt := newNetwork(n1, n2)
setRandomizedElectionTimeout(n1, n1.electionTimeout)
- for i := int64(0); i < n1.electionTimeout; i++ {
+ for i := 0; i < n1.electionTimeout; i++ {
n1.tick()
}
@@ -2781,14 +2741,13 @@ func TestAddLearner(t *testing.T) {
// TestAddNodeCheckQuorum tests that addNode does not trigger a leader election
// immediately when checkQuorum is set.
func TestAddNodeCheckQuorum(t *testing.T) {
- r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)), withFortificationDisabled())
r.checkQuorum = true
r.becomeCandidate()
r.becomeLeader()
- for i := int64(0); i < r.electionTimeout-1; i++ {
+ for i := 0; i < r.electionTimeout-1; i++ {
r.tick()
}
@@ -2802,7 +2761,7 @@ func TestAddNodeCheckQuorum(t *testing.T) {
// After another electionTimeout ticks without hearing from node 2,
// node 1 should step down.
- for i := int64(0); i < r.electionTimeout; i++ {
+ for i := 0; i < r.electionTimeout; i++ {
r.tick()
}
@@ -3073,7 +3032,7 @@ func TestLeaderTransferLeaderStepsDownImmediately(t *testing.T) {
// Eventually, the previous leader gives up on waiting and calls an election
// to reestablish leadership at the next term.
- for i := int64(0); i < lead.randomizedElectionTimeout; i++ {
+ for i := 0; i < lead.randomizedElectionTimeout; i++ {
lead.tick()
}
nt.send(lead.readMessages()...)
@@ -3086,7 +3045,7 @@ func TestLeaderTransferLeaderStepsDownImmediately(t *testing.T) {
// even the current leader is still under its leader lease.
func TestLeaderTransferWithCheckQuorum(t *testing.T) {
nt := newNetwork(nil, nil, nil)
- for i := int64(1); i < 4; i++ {
+ for i := 1; i < 4; i++ {
r := nt.peers[pb.PeerID(i)].(*raft)
r.checkQuorum = true
setRandomizedElectionTimeout(r, r.electionTimeout+i)
@@ -3094,7 +3053,7 @@ func TestLeaderTransferWithCheckQuorum(t *testing.T) {
// Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1
f := nt.peers[2].(*raft)
- for i := int64(0); i < f.electionTimeout; i++ {
+ for i := 0; i < f.electionTimeout; i++ {
f.tick()
}
@@ -3150,7 +3109,7 @@ func TestLeaderTransferToCandidate(t *testing.T) {
// Isolate node 3 so that it decides to become a pre-candidate.
nt.isolate(3)
- for i := int64(0); i < n3.randomizedElectionTimeout; i++ {
+ for i := 0; i < n3.randomizedElectionTimeout; i++ {
nt.tick(n3)
}
require.Equal(t, pb.StatePreCandidate, n3.state)
@@ -3242,12 +3201,12 @@ func TestLeaderTransferTimeout(t *testing.T) {
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
- for i := int64(0); i < lead.heartbeatTimeout; i++ {
+ for i := 0; i < lead.heartbeatTimeout; i++ {
lead.tick()
}
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
- for i := int64(0); i < lead.electionTimeout-lead.heartbeatTimeout; i++ {
+ for i := 0; i < lead.electionTimeout-lead.heartbeatTimeout; i++ {
lead.tick()
}
@@ -3422,13 +3381,13 @@ func TestLeaderTransferSecondTransferToSameNode(t *testing.T) {
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
- for i := int64(0); i < lead.heartbeatTimeout; i++ {
+ for i := 0; i < lead.heartbeatTimeout; i++ {
lead.tick()
}
// Second transfer leadership request to the same node.
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
- for i := int64(0); i < lead.electionTimeout-lead.heartbeatTimeout; i++ {
+ for i := 0; i < lead.electionTimeout-lead.heartbeatTimeout; i++ {
lead.tick()
}
@@ -3525,7 +3484,7 @@ func TestLeaderTransferStaleFollower(t *testing.T) {
// Eventually, the previous leader gives up on waiting and calls an election
// to reestablish leadership at the next term. Node 3 does not hear about this
// either.
- for i := int64(0); i < n1.randomizedElectionTimeout; i++ {
+ for i := 0; i < n1.randomizedElectionTimeout; i++ {
n1.tick()
}
nt.send(nt.filter(n1.readMessages())...)
@@ -3563,12 +3522,9 @@ func TestLeaderTransferStaleFollower(t *testing.T) {
// Previously the cluster would come to a standstill when run with PreVote
// enabled.
func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {
- n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
+ n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
+ n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
@@ -3633,12 +3589,9 @@ func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {
// TestPreVoteWithSplitVote verifies that after split vote, cluster can complete
// election in next round.
func TestPreVoteWithSplitVote(t *testing.T) {
- n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
+ n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
+ n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
@@ -3759,12 +3712,9 @@ func TestLearnerCampaign(t *testing.T) {
// n2 is follower with term 2
// n3 is partitioned, with term 4 and less log, state is candidate
func newPreVoteMigrationCluster(t *testing.T) *network {
- n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
- n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
- withStoreLiveness(raftstoreliveness.Disabled{}))
+ n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
+ n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
+ n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled())
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
@@ -3894,7 +3844,7 @@ func testConfChangeCheckBeforeCampaign(t *testing.T, v2 bool) {
})
// Trigger campaign in node 2
- for i := int64(0); i < n2.randomizedElectionTimeout; i++ {
+ for i := 0; i < n2.randomizedElectionTimeout; i++ {
n2.tick()
}
// It's still follower because committed conf change is not applied.
@@ -3914,7 +3864,7 @@ func testConfChangeCheckBeforeCampaign(t *testing.T, v2 bool) {
// Advance apply on node 1 and re-establish leadership.
nextEnts(n1, nt.storage[1])
- for i := int64(0); i < n1.randomizedElectionTimeout; i++ {
+ for i := 0; i < n1.randomizedElectionTimeout; i++ {
n1.tick()
}
nt.send(n1.readMessages()...)
@@ -4202,8 +4152,7 @@ type network struct {
// msgHook is called for each message sent. It may inspect the
// message and return true to send it or false to drop it.
- msgHook func(pb.Message) bool
- livenessFabric *raftstoreliveness.LivenessFabric
+ msgHook func(pb.Message) bool
}
// newNetwork initializes a network from peers.
@@ -4222,15 +4171,13 @@ func newNetworkWithConfig(configFunc func(*Config), peers ...stateMachine) *netw
npeers := make(map[pb.PeerID]stateMachine, size)
nstorage := make(map[pb.PeerID]*MemoryStorage, size)
- livenessFabric := raftstoreliveness.NewLivenessFabric()
+
for j, p := range peers {
id := peerAddrs[j]
- livenessFabric.AddPeer(id)
switch v := p.(type) {
case nil:
nstorage[id] = newTestMemoryStorage(withPeers(peerAddrs...))
- cfg := newTestConfig(id, 10, 1, nstorage[id],
- withStoreLiveness(livenessFabric.GetStoreLiveness(id)))
+ cfg := newTestConfig(id, 10, 1, nstorage[id])
if configFunc != nil {
configFunc(cfg)
}
@@ -4266,11 +4213,10 @@ func newNetworkWithConfig(configFunc func(*Config), peers ...stateMachine) *netw
}
}
return &network{
- peers: npeers,
- storage: nstorage,
- dropm: make(map[connem]float64),
- ignorem: make(map[pb.MessageType]bool),
- livenessFabric: livenessFabric,
+ peers: npeers,
+ storage: nstorage,
+ dropm: make(map[connem]float64),
+ ignorem: make(map[pb.MessageType]bool),
}
}
@@ -4387,19 +4333,19 @@ func idsBySize(size int) []pb.PeerID {
// setRandomizedElectionTimeout set up the value by caller instead of choosing
// by system, in some test scenario we need to fill in some expected value to
// ensure the certainty
-func setRandomizedElectionTimeout(r *raft, v int64) {
+func setRandomizedElectionTimeout(r *raft, v int) {
r.randomizedElectionTimeout = v
}
// SetRandomizedElectionTimeout is like setRandomizedElectionTimeout, but
// exported for use by tests that are not in the raft package, using RawNode.
-func SetRandomizedElectionTimeout(r *RawNode, v int64) {
+func SetRandomizedElectionTimeout(r *RawNode, v int) {
setRandomizedElectionTimeout(r.raft, v)
}
// testConfigModifiers allows callers to optionally modify newTestConfig.
type testConfigModifiers struct {
- testingStoreLiveness raftstoreliveness.StoreLiveness
+ testingDisableFortification bool
}
// testConfigModifierOpt is the type of an optional parameter to newTestConfig
@@ -4411,23 +4357,23 @@ func emptyTestConfigModifierOpt() testConfigModifierOpt {
return func(modifier *testConfigModifiers) {}
}
-// withStoreLiveness explicitly uses the supplied StoreLiveness implementation.
-func withStoreLiveness(storeLiveness raftstoreliveness.StoreLiveness) testConfigModifierOpt {
+// withRaftFortification disables raft fortification.
+func withFortificationDisabled() testConfigModifierOpt {
return func(modifier *testConfigModifiers) {
- modifier.testingStoreLiveness = storeLiveness
+ modifier.testingDisableFortification = true
}
}
func newTestConfig(
- id pb.PeerID, election, heartbeat int64, storage Storage, opts ...testConfigModifierOpt,
+ id pb.PeerID, election, heartbeat int, storage Storage, opts ...testConfigModifierOpt,
) *Config {
modifiers := testConfigModifiers{}
for _, opt := range opts {
opt(&modifiers)
}
var storeLiveness raftstoreliveness.StoreLiveness
- if modifiers.testingStoreLiveness != nil {
- storeLiveness = modifiers.testingStoreLiveness
+ if modifiers.testingDisableFortification {
+ storeLiveness = raftstoreliveness.Disabled{}
} else {
storeLiveness = raftstoreliveness.AlwaysLive{}
}
@@ -4466,13 +4412,13 @@ func newTestMemoryStorage(opts ...testMemoryStorageOptions) *MemoryStorage {
}
func newTestRaft(
- id pb.PeerID, election, heartbeat int64, storage Storage, opts ...testConfigModifierOpt,
+ id pb.PeerID, election, heartbeat int, storage Storage, opts ...testConfigModifierOpt,
) *raft {
return newRaft(newTestConfig(id, election, heartbeat, storage, opts...))
}
func newTestLearnerRaft(
- id pb.PeerID, election, heartbeat int64, storage Storage, opts ...testConfigModifierOpt,
+ id pb.PeerID, election, heartbeat int, storage Storage, opts ...testConfigModifierOpt,
) *raft {
cfg := newTestConfig(id, election, heartbeat, storage, opts...)
return newRaft(cfg)
@@ -4480,7 +4426,7 @@ func newTestLearnerRaft(
// newTestRawNode sets up a RawNode with the given peers. The configuration will
// not be reflected in the Storage.
-func newTestRawNode(id pb.PeerID, election, heartbeat int64, storage Storage) *RawNode {
+func newTestRawNode(id pb.PeerID, election, heartbeat int, storage Storage) *RawNode {
cfg := newTestConfig(id, election, heartbeat, storage)
rn, err := NewRawNode(cfg)
if err != nil {
diff --git a/pkg/raft/raftstoreliveness/BUILD.bazel b/pkg/raft/raftstoreliveness/BUILD.bazel
index 97150df90f8a..3a1f845c53b6 100644
--- a/pkg/raft/raftstoreliveness/BUILD.bazel
+++ b/pkg/raft/raftstoreliveness/BUILD.bazel
@@ -2,10 +2,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "raftstoreliveness",
- srcs = [
- "mock_store_liveness.go",
- "store_liveness.go",
- ],
+ srcs = ["store_liveness.go"],
importpath = "github.com/cockroachdb/cockroach/pkg/raft/raftstoreliveness",
visibility = ["//visibility:public"],
deps = [
diff --git a/pkg/raft/raftstoreliveness/mock_store_liveness.go b/pkg/raft/raftstoreliveness/mock_store_liveness.go
deleted file mode 100644
index 5ab14262c407..000000000000
--- a/pkg/raft/raftstoreliveness/mock_store_liveness.go
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2024 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package raftstoreliveness
-
-import (
- pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb"
- "github.com/cockroachdb/cockroach/pkg/util/hlc"
-)
-
-// livenessEntry is an entry in the MockStoreLiveness state.
-type livenessEntry struct {
- // isSupporting controls whether supportFor returns true or false.
- isSupporting bool
- supportForEpoch pb.Epoch
-
- // isSupported controls whether supportFrom returns true or false.
- isSupported bool
- supportFromEpoch pb.Epoch
-}
-
-// initLivenessEntry is the initial state entry placed in MockStoreLiveness.
-var initLivenessEntry = livenessEntry{
- // Initially, the peer is giving support to all other peers.
- isSupporting: true,
- supportForEpoch: 1,
-
- // Initially, the peer is receiving support from all other peers.
- isSupported: true,
- supportFromEpoch: 1,
-}
-
-// MockStoreLiveness is a mock implementation of StoreLiveness. It initially
-// treats all store to store connections as live, but it can be configured to
-// withdraw support, grant support, and bump the supported epoch to/from any two
-// peers.
-//
-// Each peer's state can be altered independently This makes it possible to
-// construct a uni-directional partition.
-type MockStoreLiveness struct {
- id pb.PeerID
-
- // state is a map, where state[i] represents the liveness entry for peer i.
- state map[pb.PeerID]livenessEntry
-
- // supportExpired controls whether this peer considers the leader support
- // expired or not.
- supportExpired bool
-}
-
-var _ StoreLiveness = &MockStoreLiveness{}
-
-func NewMockStoreLiveness(id pb.PeerID) *MockStoreLiveness {
- return &MockStoreLiveness{
- id: id,
- state: make(map[pb.PeerID]livenessEntry),
- supportExpired: false,
- }
-}
-
-// createStoreLivenessEntry creates a new state entry for the given peer.
-func (m *MockStoreLiveness) createStoreLivenessEntry(id pb.PeerID) {
- if _, exists := m.state[id]; exists {
- panic("attempting to create a store liveness entry that already exists")
- }
-
- m.state[id] = initLivenessEntry
-}
-
-// SupportFor implements the StoreLiveness interface.
-func (m *MockStoreLiveness) SupportFor(id pb.PeerID) (pb.Epoch, bool) {
- entry, exists := m.state[id]
- if !exists {
- panic("attempting to call SupportFor() for a non-existing entry")
- }
- return entry.supportForEpoch, entry.isSupporting
-}
-
-// SupportFrom implements the StoreLiveness interface.
-func (m *MockStoreLiveness) SupportFrom(id pb.PeerID) (pb.Epoch, hlc.Timestamp) {
- entry, exists := m.state[id]
- if !exists {
- panic("attempting to call SupportFrom() for a non-existing entry")
- }
-
- if entry.isSupported {
- return entry.supportFromEpoch, hlc.MaxTimestamp
- }
- return 0, hlc.Timestamp{}
-}
-
-// SupportFromEnabled implements the StoreLiveness interface.
-func (*MockStoreLiveness) SupportFromEnabled() bool {
- return true
-}
-
-// SupportExpired implements the StoreLiveness interface.
-func (m *MockStoreLiveness) SupportExpired(ts hlc.Timestamp) bool {
- if m.supportExpired {
- return true
- }
-
- // If not configured explicitly, infer from the supplied timestamp.
- switch ts {
- case hlc.Timestamp{}:
- return true
- case hlc.MaxTimestamp:
- return false
- default:
- panic("unexpected timestamp")
- }
-}
-
-// bumpSupportForEpoch bumps the supportFor epoch for the given peer.
-func (m *MockStoreLiveness) bumpSupportForEpoch(id pb.PeerID) {
- entry, exists := m.state[id]
- if !exists {
- panic("attempting to call bumpSupportForEpoch() for a non-existing entry")
- }
-
- entry.supportForEpoch++
- m.state[id] = entry
-}
-
-// bumpSupportFromEpoch bumps the supportFrom epoch for the given peer.
-func (m *MockStoreLiveness) bumpSupportFromEpoch(id pb.PeerID) {
- entry, exists := m.state[id]
- if !exists {
- panic("attempting to call bumpSupportFromEpoch() for a non-existing entry")
- }
-
- entry.supportFromEpoch++
- m.state[id] = entry
-}
-
-// grantSupportFor grants support for the given peer.
-func (m *MockStoreLiveness) grantSupportFor(id pb.PeerID) {
- entry, exists := m.state[id]
- if !exists {
- panic("attempting to call grantSupportFor() for a non-existing entry")
- }
-
- entry.isSupporting = true
- m.state[id] = entry
-}
-
-// grantSupportFrom grants support from the given peer.
-func (m *MockStoreLiveness) grantSupportFrom(id pb.PeerID) {
- entry, exists := m.state[id]
- if !exists {
- panic("attempting to call grantSupportFrom() for a non-existing entry")
- }
-
- entry.isSupported = true
- m.state[id] = entry
-}
-
-// withdrawSupportFor withdraws support for the given peer.
-func (m *MockStoreLiveness) withdrawSupportFor(id pb.PeerID) {
- entry, exists := m.state[id]
- if !exists {
- panic("attempting to call withdrawSupportFor() for a non-existing entry")
- }
-
- entry.isSupporting = false
- m.state[id] = entry
-}
-
-// withdrawSupportFrom withdraws support from the given peer.
-func (m *MockStoreLiveness) withdrawSupportFrom(id pb.PeerID) {
- entry, exists := m.state[id]
- if !exists {
- panic("attempting to call withdrawSupportFrom() for a non-existing entry")
- }
-
- entry.isSupported = false
- m.state[id] = entry
-}
-
-// setSupportExpired explicitly controls what SupportExpired returns regardless
-// of the timestamp.
-func (m *MockStoreLiveness) setSupportExpired(expired bool) {
- m.supportExpired = expired
-}
-
-// LivenessFabric is a global view of the store liveness state.
-type LivenessFabric struct {
- // state is an array, where state[i] represents the MockStoreLiveness entry
- // for peer i.
- state map[pb.PeerID]*MockStoreLiveness
-}
-
-// NewLivenessFabric initializes and returns a LivenessFabric.
-func NewLivenessFabric() *LivenessFabric {
- state := make(map[pb.PeerID]*MockStoreLiveness)
- return &LivenessFabric{
- state: state,
- }
-}
-
-// AddPeer adds a peer to the liveness fabric.
-func (l *LivenessFabric) AddPeer(id pb.PeerID) {
- if _, exists := l.state[id]; exists {
- panic("attempting to add a peer that already exists")
- }
-
- l.state[id] = NewMockStoreLiveness(id)
- l.state[id].createStoreLivenessEntry(id)
-
- // Iterate over all liveness stores in the fabric, and add the new peer to
- // their state.
- for _, storeLiveness := range l.state {
- // We added our self above.
- if storeLiveness.id == id {
- continue
- }
-
- storeLiveness.createStoreLivenessEntry(id)
- l.state[id].createStoreLivenessEntry(storeLiveness.id)
- }
-}
-
-// GetStoreLiveness return the MockStoreLiveness for the given peer.
-func (l *LivenessFabric) GetStoreLiveness(id pb.PeerID) *MockStoreLiveness {
- entry, exists := l.state[id]
- if !exists {
- panic("attempting to call GetStoreLiveness() for a non-existing id")
- }
- return entry
-}
-
-// BumpEpoch bumps the epoch supported by fromID for forID and starts supporting
-// the new epoch. We also update state on forID to reflect support at this new
-// epoch.
-func (l *LivenessFabric) BumpEpoch(fromID pb.PeerID, forID pb.PeerID) {
- fromEntry, exists := l.state[fromID]
- if !exists {
- panic("attempting to call BumpEpoch() for a non-existing fromID entry")
- }
- fromEntry.bumpSupportForEpoch(forID)
- fromEntry.grantSupportFor(forID)
-
- forEntry, exists := l.state[forID]
- if !exists {
- panic("attempting to call BumpEpoch() for a non-existing forID entry")
- }
- forEntry.bumpSupportFromEpoch(fromID)
- forEntry.grantSupportFrom(fromID)
-}
-
-// WithdrawSupport withdraws the support by fromID for forID. We also update
-// state on forID to reflect the withdrawal of support.
-func (l *LivenessFabric) WithdrawSupport(fromID pb.PeerID, forID pb.PeerID) {
- fromEntry, exists := l.state[fromID]
- if !exists {
- panic("attempting to call WithdrawSupport() for a non-existing fromID entry")
- }
- fromEntry.withdrawSupportFor(forID)
-
- forEntry, exists := l.state[forID]
- if !exists {
- panic("attempting to call WithdrawSupport() for a non-existing forID entry")
- }
- forEntry.withdrawSupportFrom(fromID)
-}
-
-// GrantSupport grants the support by fromID for forID. We also update state on
-// forID to reflect the withdrawal of support.
-func (l *LivenessFabric) GrantSupport(fromID pb.PeerID, forID pb.PeerID) {
- fromEntry, exists := l.state[fromID]
- if !exists {
- panic("attempting to call GrantSupport() for a non-existing fromID entry")
- }
- fromEntry.grantSupportFor(forID)
-
- forEntry, exists := l.state[forID]
- if !exists {
- panic("attempting to call GrantSupport() for a non-existing forID entry")
- }
- forEntry.grantSupportFrom(fromID)
-}
-
-// SetSupportExpired explicitly controls what SupportExpired returns regardless
-// of the timestamp supplied to it.
-func (l *LivenessFabric) SetSupportExpired(peer pb.PeerID, expired bool) {
- entry, exists := l.state[peer]
- if !exists {
- panic("attempting to call SetSupportExpired() for a non-existing peer entry")
- }
- entry.setSupportExpired(expired)
-}
-
-// WithdrawSupportForPeerFromAllPeers withdraws support for the target peer from
-// all peers in the liveness fabric.
-func (l *LivenessFabric) WithdrawSupportForPeerFromAllPeers(targetID pb.PeerID) {
- for curID := range l.state {
- l.WithdrawSupport(curID, targetID)
- }
-}
-
-// GrantSupportForPeerFromAllPeers grants support for the target peer from
-// // all peers in the liveness fabric.
-func (l *LivenessFabric) GrantSupportForPeerFromAllPeers(targetID pb.PeerID) {
- for curID := range l.state {
- l.GrantSupport(curID, targetID)
- }
-}
-
-// BumpAllSupportEpochs bumps all the support epochs in the liveness fabric.
-func (l *LivenessFabric) BumpAllSupportEpochs() {
- for s1ID, storeLiveness := range l.state {
- for s2ID := range storeLiveness.state {
- l.BumpEpoch(s1ID, s2ID)
- }
- }
-}
diff --git a/pkg/raft/rafttest/interaction_env.go b/pkg/raft/rafttest/interaction_env.go
index 73fc7213e509..616f1823a72a 100644
--- a/pkg/raft/rafttest/interaction_env.go
+++ b/pkg/raft/rafttest/interaction_env.go
@@ -33,7 +33,7 @@ type InteractionOpts struct {
// SetRandomizedElectionTimeout is used to plumb this function down from the
// raft test package.
- SetRandomizedElectionTimeout func(node *raft.RawNode, timeout int64)
+ SetRandomizedElectionTimeout func(node *raft.RawNode, timeout int)
}
// Node is a member of a raft group tested via an InteractionEnv.
diff --git a/pkg/raft/rafttest/interaction_env_handler_set_randomized_election_timeout.go b/pkg/raft/rafttest/interaction_env_handler_set_randomized_election_timeout.go
index bd4bb676fd7e..36f0cd2d8d9b 100644
--- a/pkg/raft/rafttest/interaction_env_handler_set_randomized_election_timeout.go
+++ b/pkg/raft/rafttest/interaction_env_handler_set_randomized_election_timeout.go
@@ -1,6 +1,3 @@
-// This code has been modified from its original form by The Cockroach Authors.
-// All modifications are Copyright 2024 The Cockroach Authors.
-//
// Copyright 2023 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -28,7 +25,7 @@ func (env *InteractionEnv) handleSetRandomizedElectionTimeout(
t *testing.T, d datadriven.TestData,
) error {
idx := firstAsNodeIdx(t, d)
- var timeout int64
+ var timeout int
d.ScanArgs(t, "timeout", &timeout)
require.NotZero(t, timeout)
diff --git a/pkg/raft/rafttest/interaction_env_handler_tick.go b/pkg/raft/rafttest/interaction_env_handler_tick.go
index 020424abd147..10c9ba2130ef 100644
--- a/pkg/raft/rafttest/interaction_env_handler_tick.go
+++ b/pkg/raft/rafttest/interaction_env_handler_tick.go
@@ -1,6 +1,3 @@
-// This code has been modified from its original form by The Cockroach Authors.
-// All modifications are Copyright 2024 The Cockroach Authors.
-//
// Copyright 2019 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,8 +31,8 @@ func (env *InteractionEnv) handleTickHeartbeat(t *testing.T, d datadriven.TestDa
}
// Tick the node at the given index the given number of times.
-func (env *InteractionEnv) Tick(idx int, num int64) error {
- for i := int64(0); i < num; i++ {
+func (env *InteractionEnv) Tick(idx int, num int) error {
+ for i := 0; i < num; i++ {
env.Nodes[idx].Tick()
}
return nil
diff --git a/pkg/raft/rawnode_test.go b/pkg/raft/rawnode_test.go
index 9862bb483054..f6be2cd5f0b0 100644
--- a/pkg/raft/rawnode_test.go
+++ b/pkg/raft/rawnode_test.go
@@ -25,7 +25,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/raft/quorum"
"github.com/cockroachdb/cockroach/pkg/raft/raftlogger"
pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb"
- "github.com/cockroachdb/cockroach/pkg/raft/raftstoreliveness"
"github.com/cockroachdb/cockroach/pkg/raft/tracker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -308,8 +307,7 @@ func TestRawNodeJointAutoLeave(t *testing.T) {
exp2Cs := pb.ConfState{Voters: []pb.PeerID{1}, Learners: []pb.PeerID{2}}
s := newTestMemoryStorage(withPeers(1))
- rawNode, err := NewRawNode(newTestConfig(1, 10, 1, s,
- withStoreLiveness(raftstoreliveness.Disabled{})))
+ rawNode, err := NewRawNode(newTestConfig(1, 10, 1, s, withFortificationDisabled()))
require.NoError(t, err)
rawNode.Campaign()
@@ -585,7 +583,7 @@ func TestRawNodeRestart(t *testing.T) {
assert.Equal(t, pb.Epoch(1), rawNode.raft.leadEpoch)
// Ensure we campaign after the election timeout has elapsed.
- for i := int64(0); i < rawNode.raft.randomizedElectionTimeout; i++ {
+ for i := 0; i < rawNode.raft.randomizedElectionTimeout; i++ {
// TODO(arul): consider getting rid of this hack to reset the epoch so that
// we can call an election without panicking.
rawNode.raft.leadEpoch = 0
diff --git a/pkg/raft/testdata/confchange_v1_remove_leader.txt b/pkg/raft/testdata/confchange_v1_remove_leader.txt
index a550efda9ce1..ed9a99d481c2 100644
--- a/pkg/raft/testdata/confchange_v1_remove_leader.txt
+++ b/pkg/raft/testdata/confchange_v1_remove_leader.txt
@@ -212,4 +212,4 @@ raft proposal dropped
# Nor can it campaign to become leader.
campaign 1
----
-INFO 1 is unpromotable and can not campaign
+WARN 1 is unpromotable and can not campaign
diff --git a/pkg/raft/testdata/confchange_v2_replace_leader.txt b/pkg/raft/testdata/confchange_v2_replace_leader.txt
index 342a01ed6fd8..f19700e5f589 100644
--- a/pkg/raft/testdata/confchange_v2_replace_leader.txt
+++ b/pkg/raft/testdata/confchange_v2_replace_leader.txt
@@ -170,7 +170,7 @@ raft-state
# Make sure n1 cannot campaign to become leader.
campaign 1
----
-INFO 1 is unpromotable and can not campaign
+WARN 1 is unpromotable and can not campaign
support-expired 1
----
diff --git a/pkg/raft/testdata/snapshot_new_term.txt b/pkg/raft/testdata/snapshot_new_term.txt
new file mode 100644
index 000000000000..e2dac737c2e9
--- /dev/null
+++ b/pkg/raft/testdata/snapshot_new_term.txt
@@ -0,0 +1,155 @@
+# Test that creates a scenario where a peer learns about a new leadership term
+# via a snapshot.
+
+log-level none
+----
+ok
+
+add-nodes 3 voters=(1,2,3) index=10
+----
+ok
+
+# Elect 1 as leader.
+campaign 1
+----
+ok
+
+stabilize
+----
+ok
+
+log-level debug
+----
+ok
+
+raft-state
+----
+1: StateLeader (Voter) Term:1 Lead:1 LeadEpoch:1
+2: StateFollower (Voter) Term:1 Lead:1 LeadEpoch:1
+3: StateFollower (Voter) Term:1 Lead:1 LeadEpoch:1
+
+# Transfer leadership to 2, without 3 hearing about it.
+transfer-leadership from=1 to=2
+----
+INFO 1 [term 1] starts to transfer leadership to 2
+INFO 1 sends MsgTimeoutNow to 2 immediately as 2 already has up-to-date log
+INFO 1 became follower at term 1
+
+stabilize 1 2
+----
+> 1 handling Ready
+ Ready MustSync=false:
+ State:StateFollower
+ Messages:
+ 1->2 MsgTimeoutNow Term:1 Log:0/0
+> 2 receiving messages
+ 1->2 MsgTimeoutNow Term:1 Log:0/0
+ INFO 2 [term 1] received MsgTimeoutNow from 1 and starts an election to get leadership
+ INFO 2 is starting a new election at term 1
+ INFO 2 became candidate at term 2
+ INFO 2 [logterm: 1, index: 11] sent MsgVote request to 1 at term 2
+ INFO 2 [logterm: 1, index: 11] sent MsgVote request to 3 at term 2
+> 2 handling Ready
+ Ready MustSync=true:
+ State:StateCandidate
+ HardState Term:2 Vote:2 Commit:11 Lead:0 LeadEpoch:0
+ Messages:
+ 2->1 MsgVote Term:2 Log:1/11
+ 2->3 MsgVote Term:2 Log:1/11
+ INFO 2 received MsgVoteResp from 2 at term 2
+ INFO 2 has received 1 MsgVoteResp votes and 0 vote rejections
+> 1 receiving messages
+ 2->1 MsgVote Term:2 Log:1/11
+ INFO 1 [term: 1] received a MsgVote message with higher term from 2 [term: 2]
+ INFO 1 became follower at term 2
+ INFO 1 [logterm: 1, index: 11, vote: 0] cast MsgVote for 2 [logterm: 1, index: 11] at term 2
+> 1 handling Ready
+ Ready MustSync=true:
+ HardState Term:2 Vote:2 Commit:11 Lead:0 LeadEpoch:0
+ Messages:
+ 1->2 MsgVoteResp Term:2 Log:0/0
+> 2 receiving messages
+ 1->2 MsgVoteResp Term:2 Log:0/0
+ INFO 2 received MsgVoteResp from 1 at term 2
+ INFO 2 has received 2 MsgVoteResp votes and 0 vote rejections
+ INFO 2 became leader at term 2
+> 2 handling Ready
+ Ready MustSync=true:
+ State:StateLeader
+ HardState Term:2 Vote:2 Commit:11 Lead:2 LeadEpoch:1
+ Entries:
+ 2/12 EntryNormal ""
+ Messages:
+ 2->1 MsgFortifyLeader Term:2 Log:0/0
+ 2->3 MsgFortifyLeader Term:2 Log:0/0
+ 2->1 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""]
+ 2->3 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""]
+> 1 receiving messages
+ 2->1 MsgFortifyLeader Term:2 Log:0/0
+ 2->1 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""]
+> 1 handling Ready
+ Ready MustSync=true:
+ HardState Term:2 Vote:2 Commit:11 Lead:2 LeadEpoch:1
+ Entries:
+ 2/12 EntryNormal ""
+ Messages:
+ 1->2 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:1
+ 1->2 MsgAppResp Term:2 Log:0/12 Commit:11
+> 2 receiving messages
+ 1->2 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:1
+ 1->2 MsgAppResp Term:2 Log:0/12 Commit:11
+> 2 handling Ready
+ Ready MustSync=true:
+ HardState Term:2 Vote:2 Commit:12 Lead:2 LeadEpoch:1
+ CommittedEntries:
+ 2/12 EntryNormal ""
+ Messages:
+ 2->1 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""]
+ 2->1 MsgApp Term:2 Log:2/12 Commit:12
+> 1 receiving messages
+ 2->1 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""]
+ 2->1 MsgApp Term:2 Log:2/12 Commit:12
+> 1 handling Ready
+ Ready MustSync=true:
+ HardState Term:2 Vote:2 Commit:12 Lead:2 LeadEpoch:1
+ CommittedEntries:
+ 2/12 EntryNormal ""
+ Messages:
+ 1->2 MsgAppResp Term:2 Log:0/12 Commit:11
+ 1->2 MsgAppResp Term:2 Log:0/12 Commit:12
+> 2 receiving messages
+ 1->2 MsgAppResp Term:2 Log:0/12 Commit:11
+ 1->2 MsgAppResp Term:2 Log:0/12 Commit:12
+
+# Drop inflight messages to 3.
+deliver-msgs drop=(3)
+----
+dropped: 2->3 MsgVote Term:2 Log:1/11
+dropped: 2->3 MsgFortifyLeader Term:2 Log:0/0
+dropped: 2->3 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""]
+
+# Send a manual snapshot from 2 to 3, which will be at term 2.
+send-snapshot 2 3
+----
+2->3 MsgSnap Term:2 Log:0/0
+ Snapshot: Index:12 Term:2 ConfState:Voters:[1 2 3] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
+
+stabilize
+----
+> 3 receiving messages
+ 2->3 MsgSnap Term:2 Log:0/0
+ Snapshot: Index:12 Term:2 ConfState:Voters:[1 2 3] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
+ INFO 3 [term: 1] received a MsgSnap message with higher term from 2 [term: 2]
+ INFO 3 became follower at term 2
+ INFO log [committed=11, applied=11, applying=11, unstable.offset=12, unstable.offsetInProgress=12, len(unstable.Entries)=0] starts to restore snapshot [index: 12, term: 2]
+ INFO 3 switched to configuration voters=(1 2 3)
+ INFO 3 [commit: 12, lastindex: 12, lastterm: 2] restored snapshot [index: 12, term: 2]
+ INFO 3 [commit: 12] restored snapshot [index: 12, term: 2]
+> 3 handling Ready
+ Ready MustSync=true:
+ HardState Term:2 Commit:12 Lead:0 LeadEpoch:0
+ Snapshot Index:12 Term:2 ConfState:Voters:[1 2 3] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false
+ Messages:
+ 3->2 MsgAppResp Term:2 Log:0/12 Commit:12
+> 2 receiving messages
+ 3->2 MsgAppResp Term:2 Log:0/12 Commit:12
diff --git a/pkg/raft/util.go b/pkg/raft/util.go
index b79ad287f7db..f672fb32391d 100644
--- a/pkg/raft/util.go
+++ b/pkg/raft/util.go
@@ -49,6 +49,8 @@ var isResponseMsg = [...]bool{
pb.MsgFortifyLeaderResp: true,
}
+// isMsgFromLeader contains message types that come from the leader of the
+// message's term.
var isMsgFromLeader = [...]bool{
pb.MsgApp: true,
// TODO(nvanbenschoten): we can't consider MsgSnap to be from the leader of
@@ -60,6 +62,20 @@ var isMsgFromLeader = [...]bool{
pb.MsgDeFortifyLeader: true,
}
+// isMsgIndicatingLeader contains message types that indicate that there is a
+// leader at the message's term, even if the message is not from the leader
+// itself.
+//
+// TODO(nvanbenschoten): remove this when we address the TODO above.
+var isMsgIndicatingLeader = [...]bool{
+ pb.MsgApp: true,
+ pb.MsgSnap: true,
+ pb.MsgHeartbeat: true,
+ pb.MsgTimeoutNow: true,
+ pb.MsgFortifyLeader: true,
+ pb.MsgDeFortifyLeader: true,
+}
+
func isMsgInArray(msgt pb.MessageType, arr []bool) bool {
i := int(msgt)
return i < len(arr) && arr[i]
@@ -77,6 +93,10 @@ func IsMsgFromLeader(msgt pb.MessageType) bool {
return isMsgInArray(msgt, isMsgFromLeader[:])
}
+func IsMsgIndicatingLeader(msgt pb.MessageType) bool {
+ return isMsgInArray(msgt, isMsgIndicatingLeader[:])
+}
+
func IsLocalMsgTarget(id pb.PeerID) bool {
return id == LocalAppendThread || id == LocalApplyThread
}
diff --git a/pkg/raft/util_test.go b/pkg/raft/util_test.go
index 2dba8990920d..1f0a84553c07 100644
--- a/pkg/raft/util_test.go
+++ b/pkg/raft/util_test.go
@@ -186,6 +186,50 @@ func TestMsgFromLeader(t *testing.T) {
if got != tt.isMsgFromLeader {
t.Errorf("#%d: got %v, want %v", i, got, tt.isMsgFromLeader)
}
+ if got {
+ require.True(t, IsMsgIndicatingLeader(tt.msgt),
+ "IsMsgFromLeader should imply IsMsgIndicatingLeader")
+ }
+ }
+}
+
+func TestMsgIndicatingLeader(t *testing.T) {
+ tests := []struct {
+ msgt pb.MessageType
+ isMsgIndicatingLeader bool
+ }{
+ {pb.MsgHup, false},
+ {pb.MsgBeat, false},
+ {pb.MsgUnreachable, false},
+ {pb.MsgSnapStatus, false},
+ {pb.MsgCheckQuorum, false},
+ {pb.MsgTransferLeader, false},
+ {pb.MsgProp, false},
+ {pb.MsgApp, true},
+ {pb.MsgAppResp, false},
+ {pb.MsgVote, false},
+ {pb.MsgVoteResp, false},
+ {pb.MsgSnap, true},
+ {pb.MsgHeartbeat, true},
+ {pb.MsgHeartbeatResp, false},
+ {pb.MsgTimeoutNow, true},
+ {pb.MsgPreVote, false},
+ {pb.MsgPreVoteResp, false},
+ {pb.MsgStorageAppend, false},
+ {pb.MsgStorageAppendResp, false},
+ {pb.MsgStorageApply, false},
+ {pb.MsgStorageApplyResp, false},
+ {pb.MsgForgetLeader, false},
+ {pb.MsgFortifyLeader, true},
+ {pb.MsgFortifyLeaderResp, false},
+ {pb.MsgDeFortifyLeader, true},
+ }
+
+ for i, tt := range tests {
+ got := IsMsgIndicatingLeader(tt.msgt)
+ if got != tt.isMsgIndicatingLeader {
+ t.Errorf("#%d: got %v, want %v", i, got, tt.isMsgIndicatingLeader)
+ }
}
}
diff --git a/pkg/roachprod/config/config.go b/pkg/roachprod/config/config.go
index 1b09bfef19db..525e63371773 100644
--- a/pkg/roachprod/config/config.go
+++ b/pkg/roachprod/config/config.go
@@ -196,6 +196,7 @@ var DefaultPubKeyNames = []string{
"id_ed25519",
"id_ed25519_sk",
"id_dsa",
+ "google_compute_engine",
}
// SSHPublicKeyPath returns the path to the default public key expected by
diff --git a/pkg/roachprod/install/cockroach.go b/pkg/roachprod/install/cockroach.go
index 0cf849fa99db..04f7d2169581 100644
--- a/pkg/roachprod/install/cockroach.go
+++ b/pkg/roachprod/install/cockroach.go
@@ -125,13 +125,6 @@ type StartOpts struct {
SkipInit bool
StoreCount int
EncryptedStores bool
- // WALFailover, if non-empty, configures the value to supply to the
- // --wal-failover start flag.
- //
- // In a multi-store configuration, this may be set to "among-stores" to
- // enable WAL failover among stores. In a single-store configuration, this
- // should be set to `path=`.
- WALFailover string
// -- Options that apply only to the StartServiceForVirtualCluster target --
VirtualClusterName string
@@ -1128,9 +1121,6 @@ func (c *SyncedCluster) generateStartFlagsKV(node Node, startOpts StartOpts) []s
args = append(args, `--enterprise-encryption`, encryptArgs)
}
}
- if startOpts.WALFailover != "" {
- args = append(args, fmt.Sprintf("--wal-failover=%s", startOpts.WALFailover))
- }
args = append(args, fmt.Sprintf("--cache=%d%%", c.maybeScaleMem(25)))
diff --git a/pkg/roachprod/opentelemetry/cockroachdb_metrics.go b/pkg/roachprod/opentelemetry/cockroachdb_metrics.go
index 56b629d987a2..7b954b986492 100644
--- a/pkg/roachprod/opentelemetry/cockroachdb_metrics.go
+++ b/pkg/roachprod/opentelemetry/cockroachdb_metrics.go
@@ -1102,7 +1102,7 @@ var cockroachdbMetrics = map[string]string{
"physical_replication_commit_latency_bucket": "physical_replication.commit_latency.bucket",
"physical_replication_commit_latency_count": "physical_replication.commit_latency.count",
"physical_replication_commit_latency_sum": "physical_replication.commit_latency.sum",
- "physical_replication_cutover_progress": "physical_replication.cutover_progress",
+ "physical_replication_failover_progress": "physical_replication.failover_progress",
"physical_replication_distsql_replan_count": "physical_replication.distsql_replan_count",
"physical_replication_events_ingested": "physical_replication.events_ingested",
"physical_replication_flush_hist_nanos": "physical_replication.flush_hist_nanos",
diff --git a/pkg/roachprod/roachprod.go b/pkg/roachprod/roachprod.go
index db202bf364b6..0a73d8a0c904 100644
--- a/pkg/roachprod/roachprod.go
+++ b/pkg/roachprod/roachprod.go
@@ -740,15 +740,10 @@ const DefaultBackupSchedule = `RECURRING '*/15 * * * *' FULL BACKUP '@hourly' WI
// DefaultStartOpts returns a StartOpts populated with default values.
func DefaultStartOpts() install.StartOpts {
return install.StartOpts{
- EncryptedStores: false,
- NumFilesLimit: config.DefaultNumFilesLimit,
- SkipInit: false,
- StoreCount: 1,
- // When a node has 1 store, --wal-failover=among-stores has no effect
- // but is harmless. If a node has multiple stores, it'll allow failover
- // of WALs between stores. This allows us to exercise WAL failover and
- // helps insulate us from test failures from disk stalls in roachtests.
- WALFailover: "among-stores",
+ EncryptedStores: false,
+ NumFilesLimit: config.DefaultNumFilesLimit,
+ SkipInit: false,
+ StoreCount: 1,
VirtualClusterID: 2,
ScheduleBackups: false,
ScheduleBackupArgs: DefaultBackupSchedule,
@@ -1510,6 +1505,8 @@ func cleanupFailedCreate(l *logger.Logger, clusterName string) error {
return cloud.DestroyCluster(l, c)
}
+// AddLabels adds (or updates) the given labels to the VMs corresponding to the given cluster.
+// N.B. If a VM contains a label with the same key, its value will be updated.
func AddLabels(l *logger.Logger, clusterName string, labels map[string]string) error {
c, err := getClusterFromCache(l, clusterName)
if err != nil {
diff --git a/pkg/roachprod/vm/aws/aws.go b/pkg/roachprod/vm/aws/aws.go
index 4022e9dd28f4..c1d77002808d 100644
--- a/pkg/roachprod/vm/aws/aws.go
+++ b/pkg/roachprod/vm/aws/aws.go
@@ -576,7 +576,7 @@ func (p *Provider) editLabels(
if remove {
tagArgs = append(tagArgs, fmt.Sprintf("Key=%s", key))
} else {
- tagArgs = append(tagArgs, fmt.Sprintf("Key=%s,Value=%s", key, vm.SanitizeLabel(value)))
+ tagArgs = append(tagArgs, fmt.Sprintf("Key=%s,Value=%s", key, value))
}
}
args = append(args, tagArgs...)
@@ -602,7 +602,8 @@ func (p *Provider) editLabels(
return g.Wait()
}
-// AddLabels adds the given labels to the given VMs.
+// AddLabels adds (or updates) the given labels to the given VMs.
+// N.B. If a VM contains a label with the same key, its value will be updated.
func (p *Provider) AddLabels(l *logger.Logger, vms vm.List, labels map[string]string) error {
return p.editLabels(l, vms, labels, false)
}
diff --git a/pkg/roachprod/vm/azure/azure.go b/pkg/roachprod/vm/azure/azure.go
index 0cc7cbee2f52..c4e28516cdb4 100644
--- a/pkg/roachprod/vm/azure/azure.go
+++ b/pkg/roachprod/vm/azure/azure.go
@@ -209,6 +209,8 @@ func getAzureDefaultLabelMap(opts vm.CreateOpts) map[string]string {
return m
}
+// AddLabels adds (or updates) the given labels to the given VMs.
+// N.B. If a VM contains a label with the same key, its value will be updated.
func (p *Provider) AddLabels(l *logger.Logger, vms vm.List, labels map[string]string) error {
return p.editLabels(l, vms, labels, false /*removeLabels*/)
}
diff --git a/pkg/roachprod/vm/gce/gcloud.go b/pkg/roachprod/vm/gce/gcloud.go
index 114eafbaca53..3790353b536b 100644
--- a/pkg/roachprod/vm/gce/gcloud.go
+++ b/pkg/roachprod/vm/gce/gcloud.go
@@ -312,7 +312,6 @@ func DefaultProviderOpts() *ProviderOpts {
SSDCount: 1,
PDVolumeType: "pd-ssd",
PDVolumeSize: 500,
- PDVolumeCount: 1,
TerminateOnMigration: false,
UseSpot: false,
preemptible: false,
@@ -336,7 +335,6 @@ type ProviderOpts struct {
SSDCount int
PDVolumeType string
PDVolumeSize int
- PDVolumeCount int
UseMultipleDisks bool
// use spot instances (i.e., latest version of preemptibles which can run > 24 hours)
UseSpot bool
@@ -1056,8 +1054,6 @@ func (o *ProviderOpts) ConfigureCreateFlags(flags *pflag.FlagSet) {
"Type of the persistent disk volume, only used if local-ssd=false")
flags.IntVar(&o.PDVolumeSize, ProviderName+"-pd-volume-size", 500,
"Size in GB of persistent disk volume, only used if local-ssd=false")
- flags.IntVar(&o.PDVolumeCount, ProviderName+"-pd-volume-count", 1,
- "Number of persistent disk volumes, only used if local-ssd=false")
flags.BoolVar(&o.UseMultipleDisks, ProviderName+"-enable-multiple-stores",
false, "Enable the use of multiple stores by creating one store directory per disk. "+
"Default is to raid0 stripe all disks.")
@@ -1190,7 +1186,7 @@ func (p *Provider) editLabels(
if remove {
tagArgs = append(tagArgs, key)
} else {
- tagArgs = append(tagArgs, fmt.Sprintf("%s=%s", key, vm.SanitizeLabel(value)))
+ tagArgs = append(tagArgs, fmt.Sprintf("%s=%s", key, value))
}
}
tagArgsString := strings.Join(tagArgs, ",")
@@ -1216,7 +1212,8 @@ func (p *Provider) editLabels(
return g.Wait()
}
-// AddLabels adds the given labels to the given VMs.
+// AddLabels adds (or updates) the given labels to the given VMs.
+// N.B. If a VM contains a label with the same key, its value will be updated.
func (p *Provider) AddLabels(l *logger.Logger, vms vm.List, labels map[string]string) error {
return p.editLabels(l, vms, labels, false /* remove */)
}
@@ -1392,18 +1389,15 @@ func (p *Provider) computeInstanceArgs(
extraMountOpts = fmt.Sprintf("%s,nobarrier", extraMountOpts)
}
} else {
- // create the "PDVolumeCount" number of persistent disks with the same configuration
- for i := 0; i < providerOpts.PDVolumeCount; i++ {
- pdProps := []string{
- fmt.Sprintf("type=%s", providerOpts.PDVolumeType),
- fmt.Sprintf("size=%dGB", providerOpts.PDVolumeSize),
- "auto-delete=yes",
- }
- // TODO(pavelkalinnikov): support disk types with "provisioned-throughput"
- // option, such as Hyperdisk Throughput:
- // https://cloud.google.com/compute/docs/disks/add-hyperdisk#hyperdisk-throughput.
- args = append(args, "--create-disk", strings.Join(pdProps, ","))
- }
+ pdProps := []string{
+ fmt.Sprintf("type=%s", providerOpts.PDVolumeType),
+ fmt.Sprintf("size=%dGB", providerOpts.PDVolumeSize),
+ "auto-delete=yes",
+ }
+ // TODO(pavelkalinnikov): support disk types with "provisioned-throughput"
+ // option, such as Hyperdisk Throughput:
+ // https://cloud.google.com/compute/docs/disks/add-hyperdisk#hyperdisk-throughput.
+ args = append(args, "--create-disk", strings.Join(pdProps, ","))
// Enable DISCARD commands for persistent disks, as is advised in:
// https://cloud.google.com/compute/docs/disks/optimizing-pd-performance#formatting_parameters.
extraMountOpts = "discard"
@@ -1685,7 +1679,7 @@ func (p *Provider) Create(
return err
}
}
- return propagateDiskLabels(l, project, labels, zoneToHostNames, opts.SSDOpts.UseLocalSSD, providerOpts.PDVolumeCount)
+ return propagateDiskLabels(l, project, labels, zoneToHostNames, opts.SSDOpts.UseLocalSSD)
}
// computeGrowDistribution computes the distribution of new nodes across the
@@ -1807,8 +1801,7 @@ func (p *Provider) Grow(l *logger.Logger, vms vm.List, clusterName string, names
}
labelsJoined += fmt.Sprintf("%s=%s", key, value)
}
- return propagateDiskLabels(l, project, labelsJoined, zoneToHostNames, len(vms[0].LocalDisks) != 0,
- len(vms[0].NonBootAttachedVolumes))
+ return propagateDiskLabels(l, project, labelsJoined, zoneToHostNames, len(vms[0].LocalDisks) != 0)
}
type jsonBackendService struct {
@@ -2291,7 +2284,6 @@ func propagateDiskLabels(
labels string,
zoneToHostNames map[string][]string,
useLocalSSD bool,
- pdVolumeCount int,
) error {
var g errgroup.Group
@@ -2321,23 +2313,16 @@ func propagateDiskLabels(
})
if !useLocalSSD {
- // The persistent disks are already created. The disks are suffixed with an offset
- // which starts from 1. A total of "pdVolumeCount" disks are created.
g.Go(func() error {
- // the loop is run inside the go-routine to ensure that we do not run all the gcloud commands.
- // For a 150 node with 4 disks, we have seen that the gcloud command cannot handle so many concurrent
- // commands.
- for offset := 1; offset <= pdVolumeCount; offset++ {
- persistentDiskArgs := append([]string(nil), argsPrefix...)
- persistentDiskArgs = append(persistentDiskArgs, zoneArg...)
- // N.B. additional persistent disks are suffixed with the offset, starting at 1.
- persistentDiskArgs = append(persistentDiskArgs, fmt.Sprintf("%s-%d", hostName, offset))
- cmd := exec.Command("gcloud", persistentDiskArgs...)
-
- output, err := cmd.CombinedOutput()
- if err != nil {
- return errors.Wrapf(err, "Command: gcloud %s\nOutput: %s", persistentDiskArgs, output)
- }
+ persistentDiskArgs := append([]string(nil), argsPrefix...)
+ persistentDiskArgs = append(persistentDiskArgs, zoneArg...)
+ // N.B. additional persistent disks are suffixed with the offset, starting at 1.
+ persistentDiskArgs = append(persistentDiskArgs, fmt.Sprintf("%s-1", hostName))
+ cmd := exec.Command("gcloud", persistentDiskArgs...)
+
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return errors.Wrapf(err, "Command: gcloud %s\nOutput: %s", persistentDiskArgs, output)
}
return nil
})
diff --git a/pkg/roachprod/vm/vm.go b/pkg/roachprod/vm/vm.go
index 04b2a5ff6d33..0e8185ec6f9e 100644
--- a/pkg/roachprod/vm/vm.go
+++ b/pkg/roachprod/vm/vm.go
@@ -483,10 +483,11 @@ type Provider interface {
// Return the account name associated with the provider
FindActiveAccount(l *logger.Logger) (string, error)
List(l *logger.Logger, opts ListOptions) (List, error)
- // The name of the Provider, which will also surface in the top-level Providers map.
-
+ // AddLabels adds (or updates) the given labels to the given VMs.
+ // N.B. If a VM contains a label with the same key, its value will be updated.
AddLabels(l *logger.Logger, vms List, labels map[string]string) error
RemoveLabels(l *logger.Logger, vms List, labels []string) error
+ // The name of the Provider, which will also surface in the top-level Providers map.
Name() string
// Active returns true if the provider is properly installed and capable of
@@ -753,7 +754,7 @@ func DNSSafeName(name string) string {
return regexp.MustCompile(`-+`).ReplaceAllString(name, "-")
}
-// SanitizeLabel returns a version of the string that can be used as a label.
+// SanitizeLabel returns a version of the string that can be used as a (resource) label.
// This takes the lowest common denominator of the label requirements;
// GCE: "The value can only contain lowercase letters, numeric characters, underscores and dashes.
// The value can be at most 63 characters long"
@@ -771,3 +772,12 @@ func SanitizeLabel(label string) string {
label = strings.Trim(label, "-")
return label
}
+
+// SanitizeLabelValues returns the same set of keys with sanitized values.
+func SanitizeLabelValues(labels map[string]string) map[string]string {
+ sanitized := map[string]string{}
+ for k, v := range labels {
+ sanitized[k] = SanitizeLabel(v)
+ }
+ return sanitized
+}
diff --git a/pkg/rpc/context_test.go b/pkg/rpc/context_test.go
index 0eda6956f1cc..266b70093328 100644
--- a/pkg/rpc/context_test.go
+++ b/pkg/rpc/context_test.go
@@ -283,6 +283,10 @@ type rangefeedEventSink struct {
var _ kvpb.RangeFeedEventSink = (*rangefeedEventSink)(nil)
+func (s *rangefeedEventSink) Context() context.Context {
+ return s.ctx
+}
+
// Note that SendUnbuffered itself is not thread-safe (grpc stream is not
// thread-safe), but tests were written in a way that sends sequentially,
// ensuring thread-safety for SendUnbuffered.
diff --git a/pkg/server/api_v2.go b/pkg/server/api_v2.go
index 1a611cb2713e..4ced81df4e89 100644
--- a/pkg/server/api_v2.go
+++ b/pkg/server/api_v2.go
@@ -191,7 +191,7 @@ func registerRoutes(
{"sql/", a.execSQL, true, authserver.RegularRole, true},
{"database_metadata/", a.GetDbMetadata, true, authserver.RegularRole, true},
- {"database_metadata/{database_id:[0-9]+}/", a.GetDbMetadataForId, true, authserver.RegularRole, true},
+ {"database_metadata/{database_id:[0-9]+}/", a.GetDbMetadataWithDetails, true, authserver.RegularRole, true},
{"table_metadata/", a.GetTableMetadata, true, authserver.RegularRole, true},
{"table_metadata/{table_id:[0-9]+}/", a.GetTableMetadataWithDetails, true, authserver.RegularRole, true},
{"table_metadata/updatejob/", a.TableMetadataJob, true, authserver.RegularRole, true},
diff --git a/pkg/server/api_v2_databases_metadata.go b/pkg/server/api_v2_databases_metadata.go
index d111384d0263..70a9b5d8455d 100644
--- a/pkg/server/api_v2_databases_metadata.go
+++ b/pkg/server/api_v2_databases_metadata.go
@@ -7,18 +7,21 @@ package server
import (
"context"
+ "encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"time"
+ "github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/server/apiutil"
"github.com/cockroachdb/cockroach/pkg/server/authserver"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/server/srverrors"
+ "github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/isql"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
@@ -136,6 +139,10 @@ func (a *apiV2Server) GetTableMetadata(w http.ResponseWriter, r *http.Request) {
return
}
+ if !checkVersion(ctx, w, a.sqlServer.execCfg.Settings) {
+ return
+ }
+
queryValues := r.URL.Query()
dbId, err := apiutil.GetIntQueryStringVal(queryValues, dbIdKey)
if err != nil {
@@ -261,6 +268,9 @@ func (a *apiV2Server) GetTableMetadataWithDetails(w http.ResponseWriter, r *http
}
ctx := a.sqlServer.AnnotateCtx(r.Context())
+ if !checkVersion(ctx, w, a.sqlServer.execCfg.Settings) {
+ return
+ }
sqlUser := authserver.UserFromHTTPAuthInfoContext(ctx)
pathVars := mux.Vars(r)
tableId, err := strconv.Atoi(pathVars["table_id"])
@@ -601,7 +611,9 @@ func (a *apiV2Server) GetDbMetadata(w http.ResponseWriter, r *http.Request) {
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
return
}
-
+ if !checkVersion(ctx, w, a.sqlServer.execCfg.Settings) {
+ return
+ }
queryValues := r.URL.Query()
dbName := queryValues.Get(nameKey)
@@ -681,7 +693,7 @@ func (a *apiV2Server) GetDbMetadata(w http.ResponseWriter, r *http.Request) {
apiutil.WriteJSONResponse(ctx, w, 200, resp)
}
-// GetDbMetadataForId fetches database metadata for a specific database id.
+// GetDbMetadataWithDetails fetches database metadata for a specific database id.
//
// The user making the request must have the CONNECT database grant for the database, or the admin privilege.
//
@@ -704,13 +716,16 @@ func (a *apiV2Server) GetDbMetadata(w http.ResponseWriter, r *http.Request) {
// "404":
// description: If the database for the provided id doesn't exist or the user doesn't have necessary permissions
// to access the database
-func (a *apiV2Server) GetDbMetadataForId(w http.ResponseWriter, r *http.Request) {
+func (a *apiV2Server) GetDbMetadataWithDetails(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
return
}
ctx := a.sqlServer.AnnotateCtx(r.Context())
+ if !checkVersion(ctx, w, a.sqlServer.execCfg.Settings) {
+ return
+ }
sqlUser := authserver.UserFromHTTPAuthInfoContext(ctx)
pathVars := mux.Vars(r)
databaseId, err := strconv.Atoi(pathVars["database_id"])
@@ -917,6 +932,9 @@ func rowToDatabaseMetadata(scanner resultScanner, row tree.Datums) (dbm dbMetada
func (a *apiV2Server) TableMetadataJob(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx = a.sqlServer.AnnotateCtx(ctx)
+ if !checkVersion(ctx, w, a.sqlServer.execCfg.Settings) {
+ return
+ }
sqlUser := authserver.UserFromHTTPAuthInfoContext(ctx)
authorized, err := a.updateTableMetadataJobAuthorized(ctx, sqlUser)
if err != nil {
@@ -1068,6 +1086,30 @@ func (a *apiV2Server) updateTableMetadataJobAuthorized(
return count > 0, nil
}
+func checkVersion(ctx context.Context, w http.ResponseWriter, settings *cluster.Settings) bool {
+ if !settings.Version.IsActive(context.TODO(), clusterversion.V24_3_AddTableMetadataCols) {
+ message := "This API is not accessible on this version of CockroachDB."
+ resp := versionConflictResponse{
+ Version: settings.Version.ActiveVersion(ctx).Version.String(),
+ Message: message,
+ }
+ b, err := json.Marshal(&resp)
+ if err != nil {
+ srverrors.APIV2InternalError(ctx, err, w)
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusConflict)
+ _, err = w.Write(b)
+ if err != nil {
+ log.Warningf(ctx, "HTTP short write: %v", err)
+ }
+ return false
+ }
+
+ return true
+}
+
type PaginatedResponse[T any] struct {
Results T `json:"results"`
PaginationInfo paginationInfo `json:"pagination_info"`
@@ -1134,3 +1176,8 @@ type tableMetadataWithDetailsResponse struct {
type dbMetadataWithDetailsResponse struct {
Metadata dbMetadata `json:"metadata"`
}
+
+type versionConflictResponse struct {
+ Version string
+ Message string
+}
diff --git a/pkg/server/api_v2_databases_metadata_test.go b/pkg/server/api_v2_databases_metadata_test.go
index 339a7490a9d5..20aed6b9fd17 100644
--- a/pkg/server/api_v2_databases_metadata_test.go
+++ b/pkg/server/api_v2_databases_metadata_test.go
@@ -19,6 +19,7 @@ import (
"time"
"github.com/cockroachdb/cockroach/pkg/base"
+ "github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/sql/tablemetadatacache"
tablemetadatacache_util "github.com/cockroachdb/cockroach/pkg/sql/tablemetadatacache/util"
@@ -320,7 +321,7 @@ func TestGetTableMetadata(t *testing.T) {
})
}
-func TestGetTableMetadataForId(t *testing.T) {
+func TestGetTableMetadataWithDetails(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testCluster := serverutils.StartCluster(t, 1, base.TestClusterArgs{})
@@ -621,7 +622,7 @@ func TestGetDbMetadata(t *testing.T) {
})
}
-func TestGetDbMetadataForId(t *testing.T) {
+func TestGetDbMetadataWithDetails(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testCluster := serverutils.StartCluster(t, 1, base.TestClusterArgs{})
@@ -837,6 +838,42 @@ func TestTriggerMetadataUpdateJob(t *testing.T) {
})
}
+func TestVersionGating(t *testing.T) {
+ defer leaktest.AfterTest(t)()
+ defer log.Scope(t).Close(t)
+ ts := serverutils.StartServerOnly(t, base.TestServerArgs{
+ Knobs: base.TestingKnobs{
+ Server: &TestingKnobs{
+ DisableAutomaticVersionUpgrade: make(chan struct{}),
+ ClusterVersionOverride: clusterversion.MinSupported.Version(),
+ },
+ },
+ })
+ defer ts.Stopper().Stop(context.Background())
+ client, err := ts.GetAdminHTTPClient()
+ require.NoError(t, err)
+ var testCases = []struct {
+ name string
+ url string
+ method string
+ }{
+ {"table metadata", "/api/v2/table_metadata/?dbId=1", http.MethodGet},
+ {"table metadata details", "/api/v2/table_metadata/1/", http.MethodGet},
+ {"database metadata", "/api/v2/database_metadata/", http.MethodGet},
+ {"database metadata details", "/api/v2/database_metadata/1/", http.MethodGet},
+ {"table metadata update job info", "/api/v2/table_metadata/updatejob/", http.MethodGet},
+ {"table metadata update job trigger", "/api/v2/table_metadata/updatejob/", http.MethodPost},
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ resp := makeApiRequest[versionConflictResponse](t, client, ts.AdminURL().WithPath(testCase.url).String(), testCase.method)
+ require.Equal(t, clusterversion.MinSupported.Version().String(), resp.Version)
+ })
+
+ }
+}
+
func makeApiRequest[T any](
t *testing.T, client http.Client, uri string, httpMethod string,
) (mdResp T) {
diff --git a/pkg/server/license/enforcer.go b/pkg/server/license/enforcer.go
index fb187a6d9400..33ee54693a41 100644
--- a/pkg/server/license/enforcer.go
+++ b/pkg/server/license/enforcer.go
@@ -352,6 +352,11 @@ func (e *Enforcer) GetTelemetryDeadline() (deadline, lastPing time.Time, ok bool
}
lastTelemetryDataReceived := e.telemetryStatusReporter.GetLastSuccessfulTelemetryPing()
+ pingOverrideForTesting := envutil.EnvOrDefaultInt64("COCKROACH_LAST_SUCCESSFUL_TELEMETRY_PING", lastTelemetryDataReceived.Unix())
+ if pingOverrideForTesting < lastTelemetryDataReceived.Unix() {
+ lastTelemetryDataReceived = timeutil.Unix(pingOverrideForTesting, 0)
+ }
+
throttleTS := lastTelemetryDataReceived.Add(e.getMaxTelemetryInterval())
return throttleTS, lastTelemetryDataReceived, true
}
@@ -614,10 +619,12 @@ func (e *Enforcer) getGracePeriodDuration(defaultAndMaxLength time.Duration) tim
return newLength
}
+var maxOpenTxnsDuringThrottle = envutil.EnvOrDefaultInt64("COCKROACH_MAX_OPEN_TXNS_DURING_THROTTLE", defaultMaxOpenTransactions)
+
// getMaxOpenTransactions returns the number of open transactions allowed before
// throttling takes affect.
func (e *Enforcer) getMaxOpenTransactions() int64 {
- newLimit := envutil.EnvOrDefaultInt64("COCKROACH_MAX_OPEN_TXNS_DURING_THROTTLE", defaultMaxOpenTransactions)
+ newLimit := maxOpenTxnsDuringThrottle
if tk := e.GetTestingKnobs(); tk != nil && tk.OverrideMaxOpenTransactions != nil {
newLimit = *tk.OverrideMaxOpenTransactions
}
diff --git a/pkg/server/node.go b/pkg/server/node.go
index 972aee249f4d..e3d396907fc8 100644
--- a/pkg/server/node.go
+++ b/pkg/server/node.go
@@ -2131,9 +2131,9 @@ func (n *Node) MuxRangeFeed(muxStream kvpb.Internal_MuxRangeFeedServer) error {
var streamSink rangefeed.Stream
if ubs, ok := sm.(*rangefeed.UnbufferedSender); ok {
- streamSink = rangefeed.NewPerRangeEventSink(req.RangeID, req.StreamID, ubs)
+ streamSink = rangefeed.NewPerRangeEventSink(streamCtx, req.RangeID, req.StreamID, ubs)
} else if bs, ok := sm.(*rangefeed.BufferedSender); ok {
- streamSink = rangefeed.NewBufferedPerRangeEventSink(req.RangeID, req.StreamID, bs)
+ streamSink = rangefeed.NewBufferedPerRangeEventSink(streamCtx, req.RangeID, req.StreamID, bs)
} else {
log.Fatalf(streamCtx, "unknown sender type %T", sm)
}
@@ -2144,7 +2144,7 @@ func (n *Node) MuxRangeFeed(muxStream kvpb.Internal_MuxRangeFeedServer) error {
// nil without blocking on rangefeed completion. Events are then sent to
// the provided streamSink. If the rangefeed disconnects after being
// successfully registered, it calls streamSink.Disconnect with the error.
- if err := n.stores.RangeFeed(streamCtx, req, streamSink); err != nil {
+ if err := n.stores.RangeFeed(req, streamSink); err != nil {
sm.SendBufferedError(
makeMuxRangefeedErrorEvent(req.StreamID, req.RangeID, kvpb.NewError(err)))
}
diff --git a/pkg/server/server.go b/pkg/server/server.go
index 69599be4ac03..ac0c1af1eba4 100644
--- a/pkg/server/server.go
+++ b/pkg/server/server.go
@@ -179,11 +179,10 @@ type topLevelServer struct {
// keyVisualizerServer implements `keyvispb.KeyVisualizerServer`
keyVisualizerServer *KeyVisualizerServer
- recoveryServer *loqrecovery.Server
- raftTransport *kvserver.RaftTransport
- storelivenessTransport *storeliveness.Transport
- stopper *stop.Stopper
- stopTrigger *stopTrigger
+ recoveryServer *loqrecovery.Server
+ raftTransport *kvserver.RaftTransport
+ stopper *stop.Stopper
+ stopTrigger *stopTrigger
debug *debug.Server
kvProber *kvprober.Prober
@@ -1283,7 +1282,6 @@ func NewServer(cfg Config, stopper *stop.Stopper) (serverctl.ServerStartupInterf
tsServer: &sTS,
recoveryServer: recoveryServer,
raftTransport: raftTransport,
- storelivenessTransport: storeLivenessTransport,
stopper: stopper,
stopTrigger: stopTrigger,
debug: debugServer,
diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go
index 8f62e1e3f763..627100833e63 100644
--- a/pkg/server/testserver.go
+++ b/pkg/server/testserver.go
@@ -542,14 +542,6 @@ func (ts *testServer) RaftTransport() interface{} {
return nil
}
-// StoreLivenessTransport is part of the serverutils.StorageLayerInterface.
-func (ts *testServer) StoreLivenessTransport() interface{} {
- if ts != nil {
- return ts.storelivenessTransport
- }
- return nil
-}
-
// AmbientCtx implements serverutils.ApplicationLayerInterface. This
// retrieves the ambient context for this server. This is intended for
// exclusive use by test code.
diff --git a/pkg/settings/registry.go b/pkg/settings/registry.go
index 95e00bb737fc..d8df12c845f9 100644
--- a/pkg/settings/registry.go
+++ b/pkg/settings/registry.go
@@ -517,11 +517,15 @@ var ReadableTypes = map[string]string{
}
// RedactedValue returns:
-// - a string representation of the value, if the setting is reportable (or it
-// is a string setting with an empty value);
-// - "" if the setting is not reportable;
+// - a string representation of the value, if the setting is reportable;
+// - "" if the setting is not reportable, sensitive, or a string;
// - "" if there is no setting with this name.
func RedactedValue(key InternalKey, values *Values, forSystemTenant bool) string {
+ if k, ok := registry[key]; ok {
+ if k.Typ() == "s" || k.isSensitive() || !k.isReportable() {
+ return ""
+ }
+ }
if setting, ok := LookupForReportingByKey(key, forSystemTenant); ok {
return setting.String(values)
}
diff --git a/pkg/sql/alter_column_type.go b/pkg/sql/alter_column_type.go
index 17b942777af0..44982b6cf3e9 100644
--- a/pkg/sql/alter_column_type.go
+++ b/pkg/sql/alter_column_type.go
@@ -32,6 +32,10 @@ import (
var AlterColTypeInTxnNotSupportedErr = unimplemented.NewWithIssuef(
49351, "ALTER COLUMN TYPE is not supported inside a transaction")
+var alterColTypeInCombinationNotSupportedErr = unimplemented.NewWithIssuef(
+ 49351, "ALTER COLUMN TYPE cannot be used in combination "+
+ "with other ALTER TABLE commands")
+
// AlterColumnType takes an AlterTableAlterColumnType, determines
// which conversion to use and applies the type conversion.
func AlterColumnType(
@@ -212,7 +216,7 @@ func alterColumnTypeGeneral(
}
if len(cmds) > 1 {
- return sqlerrors.NewAlterColTypeInCombinationNotSupportedError()
+ return alterColTypeInCombinationNotSupportedErr
}
// Disallow ALTER COLUMN TYPE general if the table is already undergoing
diff --git a/pkg/sql/alter_column_type_test.go b/pkg/sql/alter_column_type_test.go
index 10a80ace4568..8e32623c79f0 100644
--- a/pkg/sql/alter_column_type_test.go
+++ b/pkg/sql/alter_column_type_test.go
@@ -75,9 +75,6 @@ INSERT INTO test2 VALUES ('hello');`)
go func() {
sqlDB.ExecMultiple(t,
`SET enable_experimental_alter_column_type_general = true;`,
- // TODO(spilchen): This test is designed for the legacy schema changer.
- // Update it for the declarative schema changer (DSC).
- `SET use_declarative_schema_changer = 'off';`,
`ALTER TABLE test ALTER COLUMN x TYPE STRING;`)
wg.Done()
}()
@@ -154,9 +151,6 @@ INSERT INTO test2 VALUES (true);
go func() {
sqlDB.ExecMultiple(t,
`SET enable_experimental_alter_column_type_general = true;`,
- // TODO(spilchen): This test is designed for the legacy schema changer.
- // Update it for the declarative schema changer (DSC).
- `SET use_declarative_schema_changer = 'off';`,
`ALTER TABLE test ALTER COLUMN x TYPE BOOL USING (x > 0);`)
wg.Done()
}()
@@ -209,10 +203,7 @@ func TestVisibilityDuringAlterColumnType(t *testing.T) {
sqlDB := sqlutils.MakeSQLRunner(db)
defer s.Stopper().Stop(ctx)
- sqlDB.ExecMultiple(t, `SET enable_experimental_alter_column_type_general = true;`,
- // TODO(spilchen): This test is designed for the legacy schema changer.
- // Update it for the declarative schema changer (DSC).
- `SET use_declarative_schema_changer = 'off'`)
+ sqlDB.Exec(t, `SET enable_experimental_alter_column_type_general = true;`)
sqlDB.Exec(t, `
CREATE DATABASE t;
@@ -268,7 +259,7 @@ func TestAlterColumnTypeFailureRollback(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t.test (x STRING);`)
sqlDB.Exec(t, `INSERT INTO t.test VALUES ('1'), ('2'), ('HELLO');`)
- expected := "pq: failed to construct index entries during backfill: could not parse \"HELLO\" as type int: strconv.ParseInt: parsing \"HELLO\": invalid syntax"
+ expected := "pq: could not parse \"HELLO\" as type int: strconv.ParseInt: parsing \"HELLO\": invalid syntax"
sqlDB.ExpectErr(t, expected, `ALTER TABLE t.test ALTER COLUMN x TYPE INT USING x::INT8;`)
// Ensure that the add column and column swap mutations are cleaned up.
diff --git a/pkg/sql/catalog/bootstrap/testdata/testdata b/pkg/sql/catalog/bootstrap/testdata/testdata
index 5cffbf9621e9..c720aa8c9f7e 100644
--- a/pkg/sql/catalog/bootstrap/testdata/testdata
+++ b/pkg/sql/catalog/bootstrap/testdata/testdata
@@ -1,7 +1,7 @@
-system hash=f93eb889512719710d1c75bff8a77ce6ad6c4e837319053f6f149ca13749d710
+system hash=f02637ca2ab3fa50efc1a4884f2406b8a0aad72a3f6249c5461e6e922a2e2491
----
[{"key":"8b"}
-,{"key":"8b89898a89","value":"0312470a0673797374656d10011a250a0d0a0561646d696e1080101880100a0c0a04726f6f7410801018801012046e6f646518032200280140004a006a0a08d8843d1002180020167000"}
+,{"key":"8b89898a89","value":"0312450a0673797374656d10011a250a0d0a0561646d696e1080101880100a0c0a04726f6f7410801018801012046e6f646518032200280140004a006a0808181002180020167000"}
,{"key":"8b898b8a89","value":"030a94030a0a64657363726970746f721803200128013a0042270a02696410011a0c08011040180030005014600020003000680070007800800100880100980100422f0a0a64657363726970746f7210021a0c08081000180030005011600020013000680070007800800100880100980100480352710a077072696d61727910011801220269642a0a64657363726970746f72300140004a10080010001a00200028003000380040005a0070027a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a210a0b0a0561646d696e102018200a0a0a04726f6f741020182012046e6f64651803800101880103980100b201130a077072696d61727910001a02696420012800b201240a1066616d5f325f64657363726970746f7210021a0a64657363726970746f7220022802b80103c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300f80300880400"}
,{"key":"8b898c8a89","value":"030acd050a0575736572731804200128013a00422d0a08757365726e616d6510011a0c0807100018003000501960002000300068007000780080010088010098010042330a0e68617368656450617373776f726410021a0c0808100018003000501160002001300068007000780080010088010098010042320a066973526f6c6510031a0c08001000180030005010600020002a0566616c73653000680070007800800100880100980100422c0a07757365725f696410041a0c080c100018003000501a60002000300068007000780080010088010098010048055290010a077072696d617279100118012208757365726e616d652a0e68617368656450617373776f72642a066973526f6c652a07757365725f6964300140004a10080010001a00200028003000380040005a007002700370047a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00102e00100e90100000000000000005a740a1175736572735f757365725f69645f696478100218012207757365725f69643004380140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060036a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201240a077072696d61727910001a08757365726e616d651a07757365725f6964200120042804b2012c0a1466616d5f325f68617368656450617373776f726410021a0e68617368656450617373776f726420022802b2011c0a0c66616d5f335f6973526f6c6510031a066973526f6c6520032803b80104c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300f80300880400"}
,{"key":"8b898d8a89","value":"030a83030a057a6f6e65731805200128013a0042270a02696410011a0c08011040180030005014600020003000680070007800800100880100980100422b0a06636f6e66696710021a0c080810001800300050116000200130006800700078008001008801009801004803526d0a077072696d61727910011801220269642a06636f6e666967300140004a10080010001a00200028003000380040005a0070027a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201130a077072696d61727910001a02696420012800b2011c0a0c66616d5f325f636f6e66696710021a06636f6e66696720022802b80103c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300f80300880400"}
@@ -198,10 +198,10 @@ system hash=f93eb889512719710d1c75bff8a77ce6ad6c4e837319053f6f149ca13749d710
,{"key":"cb"}
]
-tenant hash=ec31fb2e5b85fbb8da0beded6f174ff0a8196088aebe8bf5cdeacb07689b6d6a
+tenant hash=e025f38b283dfb401584c95355095420047d10496ec2e9bf009b4a7d8fd09b5c
----
[{"key":""}
-,{"key":"8b89898a89","value":"0312470a0673797374656d10011a250a0d0a0561646d696e1080101880100a0c0a04726f6f7410801018801012046e6f646518032200280140004a006a0a08d8843d1002180020167000"}
+,{"key":"8b89898a89","value":"0312450a0673797374656d10011a250a0d0a0561646d696e1080101880100a0c0a04726f6f7410801018801012046e6f646518032200280140004a006a0808181002180020167000"}
,{"key":"8b898b8a89","value":"030a94030a0a64657363726970746f721803200128013a0042270a02696410011a0c08011040180030005014600020003000680070007800800100880100980100422f0a0a64657363726970746f7210021a0c08081000180030005011600020013000680070007800800100880100980100480352710a077072696d61727910011801220269642a0a64657363726970746f72300140004a10080010001a00200028003000380040005a0070027a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a210a0b0a0561646d696e102018200a0a0a04726f6f741020182012046e6f64651803800101880103980100b201130a077072696d61727910001a02696420012800b201240a1066616d5f325f64657363726970746f7210021a0a64657363726970746f7220022802b80103c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300f80300880400"}
,{"key":"8b898c8a89","value":"030acd050a0575736572731804200128013a00422d0a08757365726e616d6510011a0c0807100018003000501960002000300068007000780080010088010098010042330a0e68617368656450617373776f726410021a0c0808100018003000501160002001300068007000780080010088010098010042320a066973526f6c6510031a0c08001000180030005010600020002a0566616c73653000680070007800800100880100980100422c0a07757365725f696410041a0c080c100018003000501a60002000300068007000780080010088010098010048055290010a077072696d617279100118012208757365726e616d652a0e68617368656450617373776f72642a066973526f6c652a07757365725f6964300140004a10080010001a00200028003000380040005a007002700370047a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00102e00100e90100000000000000005a740a1175736572735f757365725f69645f696478100218012207757365725f69643004380140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060036a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201240a077072696d61727910001a08757365726e616d651a07757365725f6964200120042804b2012c0a1466616d5f325f68617368656450617373776f726410021a0e68617368656450617373776f726420022802b2011c0a0c66616d5f335f6973526f6c6510031a066973526f6c6520032803b80104c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300f80300880400"}
,{"key":"8b898d8a89","value":"030a83030a057a6f6e65731805200128013a0042270a02696410011a0c08011040180030005014600020003000680070007800800100880100980100422b0a06636f6e66696710021a0c080810001800300050116000200130006800700078008001008801009801004803526d0a077072696d61727910011801220269642a06636f6e666967300140004a10080010001a00200028003000380040005a0070027a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201130a077072696d61727910001a02696420012800b2011c0a0c66616d5f325f636f6e66696710021a06636f6e66696720022802b80103c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300f80300880400"}
diff --git a/pkg/sql/catalog/colinfo/result_columns.go b/pkg/sql/catalog/colinfo/result_columns.go
index 6c911d0ba7bd..8081c74fe610 100644
--- a/pkg/sql/catalog/colinfo/result_columns.go
+++ b/pkg/sql/catalog/colinfo/result_columns.go
@@ -310,7 +310,7 @@ var TenantColumnsWithReplication = ResultColumns{
// The latest fully replicated time.
{Name: "replicated_time", Typ: types.TimestampTZ},
{Name: "replication_lag", Typ: types.Interval},
- {Name: "cutover_time", Typ: types.Decimal},
+ {Name: "failover_time", Typ: types.Decimal},
{Name: "status", Typ: types.String},
}
@@ -351,18 +351,16 @@ var Ranges = append(
// The following columns are computed by RangesExtraRenders below.
ResultColumn{Name: "lease_holder", Typ: types.Int},
ResultColumn{Name: "range_size", Typ: types.Int},
- ResultColumn{Name: "errors", Typ: types.String},
)
// RangesExtraRenders describes the extra projections in
// crdb_internal.ranges not included in crdb_internal.ranges_no_leases.
const RangesExtraRenders = `
- (crdb_internal.lease_holder_with_errors(start_key)->>'Leaseholder')::INT AS lease_holder,
- (crdb_internal.range_stats_with_errors(start_key)->'RangeStats'->>'key_bytes')::INT +
- (crdb_internal.range_stats_with_errors(start_key)->'RangeStats'->>'val_bytes')::INT +
- coalesce((crdb_internal.range_stats_with_errors(start_key)->'RangeStats'->>'range_key_bytes')::INT, 0) +
- coalesce((crdb_internal.range_stats_with_errors(start_key)->'RangeStats'->>'range_val_bytes')::INT, 0) AS range_size,
- concat(crdb_internal.lease_holder_with_errors(start_key)->>'Error', ' ', crdb_internal.range_stats_with_errors(start_key)->>'Error') AS errors
+ crdb_internal.lease_holder(start_key) AS lease_holder,
+ (crdb_internal.range_stats(start_key)->>'key_bytes')::INT +
+ (crdb_internal.range_stats(start_key)->>'val_bytes')::INT +
+ coalesce((crdb_internal.range_stats(start_key)->>'range_key_bytes')::INT, 0) +
+ coalesce((crdb_internal.range_stats(start_key)->>'range_val_bytes')::INT, 0) AS range_size
`
// IdentifySystemColumns is the schema for IDENTIFY_SYSTEM.
diff --git a/pkg/sql/catalog/replication/BUILD.bazel b/pkg/sql/catalog/replication/BUILD.bazel
index eba109a6d8f3..35812f6d1103 100644
--- a/pkg/sql/catalog/replication/BUILD.bazel
+++ b/pkg/sql/catalog/replication/BUILD.bazel
@@ -35,6 +35,7 @@ go_test(
"//pkg/security/securitytest",
"//pkg/server",
"//pkg/sql",
+ "//pkg/sql/catalog/descpb",
"//pkg/sql/catalog/lease",
"//pkg/testutils",
"//pkg/testutils/serverutils",
diff --git a/pkg/sql/catalog/replication/reader_catalog_test.go b/pkg/sql/catalog/replication/reader_catalog_test.go
index d173d1491ed0..07afc1d60778 100644
--- a/pkg/sql/catalog/replication/reader_catalog_test.go
+++ b/pkg/sql/catalog/replication/reader_catalog_test.go
@@ -9,6 +9,7 @@ import (
"context"
"fmt"
"os"
+ "strings"
"sync/atomic"
"testing"
@@ -17,6 +18,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/security/securitytest"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/lease"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/replication"
"github.com/cockroachdb/cockroach/pkg/testutils"
@@ -235,9 +237,30 @@ func TestReaderCatalogTSAdvance(t *testing.T) {
TenantName: "src",
})
require.NoError(t, err)
+
+ waitForRefresh := make(chan struct{})
+ descriptorRefreshHookEnabled := atomic.Bool{}
+ closeWaitForRefresh := func() {
+ if descriptorRefreshHookEnabled.Load() {
+ descriptorRefreshHookEnabled.Store(false)
+ close(waitForRefresh)
+ }
+ }
+ defer closeWaitForRefresh()
+ destTestingKnobs := base.TestingKnobs{
+ SQLLeaseManager: &lease.ManagerTestingKnobs{
+ TestingDescriptorRefreshedEvent: func(descriptor *descpb.Descriptor) {
+ if !descriptorRefreshHookEnabled.Load() {
+ return
+ }
+ <-waitForRefresh
+ },
+ },
+ }
destTenant, _, err := ts.StartSharedProcessTenant(ctx, base.TestSharedProcessTenantArgs{
TenantID: serverutils.TestTenantID2(),
TenantName: "dest",
+ Knobs: destTestingKnobs,
})
require.NoError(t, err)
srcConn := srcTenant.SQLConn(t)
@@ -304,52 +327,97 @@ func TestReaderCatalogTSAdvance(t *testing.T) {
require.NoError(t, advanceTS(now))
compareEqual("SELECT * FROM t1")
- // Validate multiple advances of the timestamp work concurrently with queries.
- // The tight loop below should relatively easily hit errors if all the timestamps
- // are not line up on the reader catalog.
- grp := ctxgroup.WithContext(ctx)
- require.NoError(t, err)
- var iterationsDone atomic.Bool
+
var newTS hlc.Timestamp
- grp.GoCtx(func(ctx context.Context) error {
- defer func() {
- iterationsDone.Swap(true)
- }()
- const NumIterations = 16
- for iter := 0; iter < NumIterations; iter++ {
- if _, err := srcRunner.DB.ExecContext(ctx,
- "INSERT INTO t1(val, j) VALUES('open', $1);",
- iter); err != nil {
- return err
- }
- // Signal the next timestamp value.
- newTS = ts.Clock().Now()
- // Advanced the timestamp next.
- if err := advanceTS(newTS); err != nil {
- return err
+ descriptorRefreshHookEnabled.Store(true)
+ for _, useAOST := range []bool{false, true} {
+ if useAOST {
+ closeWaitForRefresh()
+ }
+ // When AOST is enabled then a fixed number of iterations sufficient. When
+ // the AOST is disabled, then we will iterate until a reader timestamp error
+ // is generated.
+ errorDetected := false
+ checkAOSTError := func(err error) {
+ if useAOST || err == nil {
+ require.NoError(t, err)
+ return
}
+ errorDetected = errorDetected ||
+ strings.Contains(err.Error(), "PCR reader timestamp has moved forward, existing descriptor")
}
- return nil
- })
- // Validates that the implicit txn and explicit txn's
- // can safely use fixed timestamps.
- for !iterationsDone.Load() {
- tx := destRunner.Begin(t)
- _, err := tx.Exec("SELECT * FROM t1")
- require.NoError(t, err)
- _, err = tx.Exec("SELECT * FROM v1")
+ // Validate multiple advances of the timestamp work concurrently with queries.
+ // The tight loop below should relatively easily hit errors if all the timestamps
+ // are not line up on the reader catalog.
+ grp := ctxgroup.WithContext(ctx)
require.NoError(t, err)
- _, err = tx.Exec("SELECT * FROM t2")
- require.NoError(t, err)
- require.NoError(t, tx.Commit())
+ iterationsDoneCh := make(chan struct{})
+ grp.GoCtx(func(ctx context.Context) error {
+ defer func() {
+ close(iterationsDoneCh)
+ }()
+ const NumIterations = 16
+ // Ensure the minimum iterations are met, and any expected errors
+ // are observed before stopping TS advances.
+ for iter := 0; iter < NumIterations; iter++ {
+ if _, err := srcRunner.DB.ExecContext(ctx,
+ "INSERT INTO t1(val, j) VALUES('open', $1);",
+ iter); err != nil {
+ return err
+ }
+ // Signal the next timestamp value.
+ newTS = ts.Clock().Now()
+ // Advanced the timestamp next.
+ if err := advanceTS(newTS); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ // Validates that the implicit txn and explicit txn's
+ // can safely use fixed timestamps.
+ if useAOST {
+ destRunner.Exec(t, "SET bypass_pcr_reader_catalog_aost='off'")
+ } else {
+ destRunner.Exec(t, "SET bypass_pcr_reader_catalog_aost='on'")
+ }
+ iterationsDone := false
+ for !iterationsDone {
+ if !useAOST {
+ select {
+ case waitForRefresh <- struct{}{}:
+ case <-iterationsDoneCh:
+ iterationsDone = true
+ }
+ }
+ select {
+ case <-iterationsDoneCh:
+ iterationsDone = true
+ default:
+ tx := destRunner.Begin(t)
+ _, err := tx.Exec("SELECT * FROM t1")
+ checkAOSTError(err)
+ _, err = tx.Exec("SELECT * FROM v1")
+ checkAOSTError(err)
+ _, err = tx.Exec("SELECT * FROM t2")
+ checkAOSTError(err)
+ checkAOSTError(tx.Commit())
+
+ _, err = destRunner.DB.ExecContext(ctx, "SELECT * FROM t1,v1, t2")
+ checkAOSTError(err)
+ _, err = destRunner.DB.ExecContext(ctx, "SELECT * FROM v1 ORDER BY 1")
+ checkAOSTError(err)
+ _, err = destRunner.DB.ExecContext(ctx, "SELECT * FROM t2 ORDER BY 1")
+ checkAOSTError(err)
+ }
+ }
- destRunner.Exec(t, "SELECT * FROM t1,v1, t2")
- destRunner.Exec(t, "SELECT * FROM v1 ORDER BY 1")
- destRunner.Exec(t, "SELECT * FROM t2 ORDER BY 1")
+ // Finally ensure the queries actually match.
+ require.NoError(t, grp.Wait())
+ // Check if the error was detected.
+ require.Equalf(t, !useAOST, errorDetected,
+ "error was detected unexpectedly (AOST = %t on connection)", useAOST)
}
-
- // Finally ensure the queries actually match.
- require.NoError(t, grp.Wait())
now = newTS
compareEqual("SELECT * FROM t1 ORDER BY j")
compareEqual("SELECT * FROM v1 ORDER BY 1")
diff --git a/pkg/sql/catalog/seqexpr/sequence.go b/pkg/sql/catalog/seqexpr/sequence.go
index f9937ef8f187..bf3a5e60e5c5 100644
--- a/pkg/sql/catalog/seqexpr/sequence.go
+++ b/pkg/sql/catalog/seqexpr/sequence.go
@@ -80,10 +80,10 @@ func GetSequenceFromFunc(funcExpr *tree.FuncExpr) (*SeqIdentifier, error) {
if len(funcExpr.Exprs) == overload.Types.Length() {
paramTypes, ok := overload.Types.(tree.ParamTypes)
if !ok {
- panic(pgerror.Newf(
+ return nil, pgerror.Newf(
pgcode.InvalidFunctionDefinition,
"%s has invalid argument types", funcExpr.Func.String(),
- ))
+ )
}
found = true
for i := 0; i < len(paramTypes); i++ {
@@ -98,10 +98,10 @@ func GetSequenceFromFunc(funcExpr *tree.FuncExpr) (*SeqIdentifier, error) {
}
}
if !found {
- panic(pgerror.New(
+ return nil, pgerror.New(
pgcode.DatatypeMismatch,
"could not find matching function overload for given arguments",
- ))
+ )
}
}
return nil, nil
diff --git a/pkg/sql/catalog/systemschema_test/testdata/bootstrap_system b/pkg/sql/catalog/systemschema_test/testdata/bootstrap_system
index c4269bef4d09..5f2f1074e4c2 100644
--- a/pkg/sql/catalog/systemschema_test/testdata/bootstrap_system
+++ b/pkg/sql/catalog/systemschema_test/testdata/bootstrap_system
@@ -674,7 +674,7 @@ schema_telemetry
----
{"database":{"name":"defaultdb","id":100,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"2048"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3},"schemas":{"public":{"id":101}},"defaultPrivileges":{}}}
{"database":{"name":"postgres","id":102,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"2048"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3},"schemas":{"public":{"id":103}},"defaultPrivileges":{}}}
-{"database":{"name":"system","id":1,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2048","withGrantOption":"2048"},{"userProto":"root","privileges":"2048","withGrantOption":"2048"}],"ownerProto":"node","version":3},"systemDatabaseSchemaVersion":{"majorVal":1000024,"minorVal":2,"internal":22}}}
+{"database":{"name":"system","id":1,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2048","withGrantOption":"2048"},{"userProto":"root","privileges":"2048","withGrantOption":"2048"}],"ownerProto":"node","version":3},"systemDatabaseSchemaVersion":{"majorVal":24,"minorVal":2,"internal":22}}}
{"table":{"name":"comments","id":24,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"type","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"object_id","id":2,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"sub_id","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"comment","id":4,"type":{"family":"StringFamily","oid":25}}],"nextColumnId":5,"families":[{"name":"primary","columnNames":["type","object_id","sub_id"],"columnIds":[1,2,3]},{"name":"fam_4_comment","id":4,"columnNames":["comment"],"columnIds":[4],"defaultColumnId":4}],"nextFamilyId":5,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["type","object_id","sub_id"],"keyColumnDirections":["ASC","ASC","ASC"],"storeColumnNames":["comment"],"keyColumnIds":[1,2,3],"storeColumnIds":[4],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"public","privileges":"32"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}}
{"table":{"name":"database_role_settings","id":44,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"database_id","id":1,"type":{"family":"OidFamily","oid":26}},{"name":"role_name","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"settings","id":3,"type":{"family":"ArrayFamily","arrayElemType":"StringFamily","oid":1009,"arrayContents":{"family":"StringFamily","oid":25}}},{"name":"role_id","id":4,"type":{"family":"OidFamily","oid":26}}],"nextColumnId":5,"families":[{"name":"primary","columnNames":["database_id","role_name","settings","role_id"],"columnIds":[1,2,3,4]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["database_id","role_name"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["settings","role_id"],"keyColumnIds":[1,2],"storeColumnIds":[3,4],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":2},"indexes":[{"name":"database_role_settings_database_id_role_id_key","id":2,"unique":true,"version":3,"keyColumnNames":["database_id","role_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["settings"],"keyColumnIds":[1,4],"keySuffixColumnIds":[2],"storeColumnIds":[3],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"constraintId":1}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}}
{"table":{"name":"descriptor","id":3,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"descriptor","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id"],"columnIds":[1]},{"name":"fam_2_descriptor","id":2,"columnNames":["descriptor"],"columnIds":[2],"defaultColumnId":2}],"nextFamilyId":3,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["descriptor"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}}
diff --git a/pkg/sql/catalog/systemschema_test/testdata/bootstrap_tenant b/pkg/sql/catalog/systemschema_test/testdata/bootstrap_tenant
index c4269bef4d09..5f2f1074e4c2 100644
--- a/pkg/sql/catalog/systemschema_test/testdata/bootstrap_tenant
+++ b/pkg/sql/catalog/systemschema_test/testdata/bootstrap_tenant
@@ -674,7 +674,7 @@ schema_telemetry
----
{"database":{"name":"defaultdb","id":100,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"2048"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3},"schemas":{"public":{"id":101}},"defaultPrivileges":{}}}
{"database":{"name":"postgres","id":102,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"2048"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3},"schemas":{"public":{"id":103}},"defaultPrivileges":{}}}
-{"database":{"name":"system","id":1,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2048","withGrantOption":"2048"},{"userProto":"root","privileges":"2048","withGrantOption":"2048"}],"ownerProto":"node","version":3},"systemDatabaseSchemaVersion":{"majorVal":1000024,"minorVal":2,"internal":22}}}
+{"database":{"name":"system","id":1,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2048","withGrantOption":"2048"},{"userProto":"root","privileges":"2048","withGrantOption":"2048"}],"ownerProto":"node","version":3},"systemDatabaseSchemaVersion":{"majorVal":24,"minorVal":2,"internal":22}}}
{"table":{"name":"comments","id":24,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"type","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"object_id","id":2,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"sub_id","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"comment","id":4,"type":{"family":"StringFamily","oid":25}}],"nextColumnId":5,"families":[{"name":"primary","columnNames":["type","object_id","sub_id"],"columnIds":[1,2,3]},{"name":"fam_4_comment","id":4,"columnNames":["comment"],"columnIds":[4],"defaultColumnId":4}],"nextFamilyId":5,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["type","object_id","sub_id"],"keyColumnDirections":["ASC","ASC","ASC"],"storeColumnNames":["comment"],"keyColumnIds":[1,2,3],"storeColumnIds":[4],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"public","privileges":"32"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}}
{"table":{"name":"database_role_settings","id":44,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"database_id","id":1,"type":{"family":"OidFamily","oid":26}},{"name":"role_name","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"settings","id":3,"type":{"family":"ArrayFamily","arrayElemType":"StringFamily","oid":1009,"arrayContents":{"family":"StringFamily","oid":25}}},{"name":"role_id","id":4,"type":{"family":"OidFamily","oid":26}}],"nextColumnId":5,"families":[{"name":"primary","columnNames":["database_id","role_name","settings","role_id"],"columnIds":[1,2,3,4]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["database_id","role_name"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["settings","role_id"],"keyColumnIds":[1,2],"storeColumnIds":[3,4],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":2},"indexes":[{"name":"database_role_settings_database_id_role_id_key","id":2,"unique":true,"version":3,"keyColumnNames":["database_id","role_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["settings"],"keyColumnIds":[1,4],"keySuffixColumnIds":[2],"storeColumnIds":[3],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"constraintId":1}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}}
{"table":{"name":"descriptor","id":3,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"descriptor","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id"],"columnIds":[1]},{"name":"fam_2_descriptor","id":2,"columnNames":["descriptor"],"columnIds":[2],"defaultColumnId":2}],"nextFamilyId":3,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["descriptor"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}}
diff --git a/pkg/sql/colexec/BUILD.bazel b/pkg/sql/colexec/BUILD.bazel
index 7d8decab98de..53a1f439c068 100644
--- a/pkg/sql/colexec/BUILD.bazel
+++ b/pkg/sql/colexec/BUILD.bazel
@@ -75,7 +75,6 @@ go_library(
"//pkg/sql/sem/tree",
"//pkg/sql/sqltelemetry", # keep
"//pkg/sql/types",
- "//pkg/storage/enginepb",
"//pkg/util/buildutil",
"//pkg/util/duration", # keep
"//pkg/util/encoding", # keep
diff --git a/pkg/sql/colexec/builtin_funcs.go b/pkg/sql/colexec/builtin_funcs.go
index 4631125af047..ca3e2f597963 100644
--- a/pkg/sql/colexec/builtin_funcs.go
+++ b/pkg/sql/colexec/builtin_funcs.go
@@ -133,17 +133,7 @@ func NewBuiltinFunctionOperator(
)
}
return newRangeStatsOperator(
- evalCtx.RangeStatsFetcher, allocator, argumentCols[0], outputIdx, input, false, /* withErrors */
- )
- case tree.CrdbInternalRangeStatsWithErrors:
- if len(argumentCols) != 1 {
- return nil, errors.AssertionFailedf(
- "expected 1 input column to crdb_internal.range_stats, got %d",
- len(argumentCols),
- )
- }
- return newRangeStatsOperator(
- evalCtx.RangeStatsFetcher, allocator, argumentCols[0], outputIdx, input, true, /* withErrors */
+ evalCtx.RangeStatsFetcher, allocator, argumentCols[0], outputIdx, input,
)
default:
return &defaultBuiltinFuncOperator{
diff --git a/pkg/sql/colexec/colbuilder/execplan.go b/pkg/sql/colexec/colbuilder/execplan.go
index 34dc32a3c921..4869c12cfc47 100644
--- a/pkg/sql/colexec/colbuilder/execplan.go
+++ b/pkg/sql/colexec/colbuilder/execplan.go
@@ -727,6 +727,25 @@ func NewColOperator(
result := opResult{NewColOperatorResult: colexecargs.GetNewColOperatorResult()}
r := result.NewColOperatorResult
spec := args.Spec
+ // Throughout this method we often use the type slice from the input spec to
+ // create the type schema of an operator. However, it is possible that the
+ // same type slice is shared by multiple stages of processors. If it just so
+ // happens that there is free capacity in the slice, and we append to it
+ // when planning operators for both stages, we might corrupt the type schema
+ // captured by the operators for the earlier stage. In order to prevent such
+ // type schema corruption we cap the slice to force creation of a fresh copy
+ // on the first append.
+ if flowCtx.Gateway {
+ // Sharing of the same type slice is only possible on the gateway node
+ // because we don't serialize the specs created during the physical
+ // planning. On the remote nodes each stage of processors gets their own
+ // allocation, so there is no aliasing that can lead to the type schema
+ // corruption.
+ for i := range spec.Input {
+ inputSpec := &spec.Input[i]
+ inputSpec.ColumnTypes = inputSpec.ColumnTypes[:len(inputSpec.ColumnTypes):len(inputSpec.ColumnTypes)]
+ }
+ }
inputs := args.Inputs
if args.Factory == nil {
// This code path is only used in tests.
diff --git a/pkg/sql/colexec/range_stats.go b/pkg/sql/colexec/range_stats.go
index 4d2638232ef0..345afe3d9844 100644
--- a/pkg/sql/colexec/range_stats.go
+++ b/pkg/sql/colexec/range_stats.go
@@ -14,7 +14,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
- "github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/util/json"
"github.com/cockroachdb/errors"
)
@@ -25,9 +24,6 @@ type rangeStatsOperator struct {
allocator *colmem.Allocator
argumentCol int
outputIdx int
- // withErrors defines if the operator includes any encountered errors in the
- // returned JSON struct. If true, these errors will not fail the query.
- withErrors bool
}
var _ colexecop.Operator = (*rangeStatsOperator)(nil)
@@ -42,7 +38,6 @@ func newRangeStatsOperator(
argumentCol int,
outputIdx int,
input colexecop.Operator,
- withErrors bool,
) (colexecop.Operator, error) {
return &rangeStatsOperator{
OneInputHelper: colexecop.MakeOneInputHelper(input),
@@ -50,7 +45,6 @@ func newRangeStatsOperator(
argumentCol: argumentCol,
outputIdx: outputIdx,
fetcher: fetcher,
- withErrors: withErrors,
}, nil
}
@@ -130,36 +124,17 @@ func (r *rangeStatsOperator) Next() coldata.Batch {
// keys plus some constant multiple.
// TODO(yuzefovich): add unit tests that use the RunTests test
// harness.
- res, rangeStatsErr := r.fetcher.RangeStats(r.Ctx, keys...)
- if rangeStatsErr != nil && !r.withErrors {
- colexecerror.ExpectedError(rangeStatsErr)
+ res, err := r.fetcher.RangeStats(r.Ctx, keys...)
+ if err != nil {
+ colexecerror.ExpectedError(err)
}
- if len(res) != len(keys) && !r.withErrors {
- colexecerror.InternalError(
- errors.AssertionFailedf(
- "unexpected number of RangeStats responses %d: %d expected", len(res), len(keys),
- ),
- )
+ if len(res) != len(keys) {
+ colexecerror.InternalError(errors.AssertionFailedf(
+ "unexpected number of RangeStats responses %d: %d expected", len(res), len(keys),
+ ))
}
for i, outputIdx := range keysOutputIdx {
- rswe := &rangeStatsWithErrors{}
- if rangeStatsErr != nil {
- rswe.Error = rangeStatsErr.Error()
- }
- // Not all keys from the keysOutputIdx are guaranteed to be
- // present in res (e.g. some may be missing if there were errors
- // in fetcher.RangeStats and r.withErrors = true).
- if i < len(res) {
- rswe.RangeStats = &res[i].MVCCStats
- }
- var jsonStr []byte
- var err error
- if r.withErrors {
- jsonStr, err = gojson.Marshal(rswe)
- } else {
- jsonStr, err = gojson.Marshal(&res[i].MVCCStats)
- }
-
+ jsonStr, err := gojson.Marshal(&res[i].MVCCStats)
if err != nil {
colexecerror.ExpectedError(err)
}
@@ -169,12 +144,6 @@ func (r *rangeStatsOperator) Next() coldata.Batch {
}
jsonOutput.Set(outputIdx, jsonDatum)
}
- },
- )
+ })
return batch
}
-
-type rangeStatsWithErrors struct {
- RangeStats *enginepb.MVCCStats
- Error string
-}
diff --git a/pkg/sql/colexecerror/error.go b/pkg/sql/colexecerror/error.go
index d573f6f34ee0..8c7f310729a4 100644
--- a/pkg/sql/colexecerror/error.go
+++ b/pkg/sql/colexecerror/error.go
@@ -17,11 +17,7 @@ import (
"github.com/gogo/protobuf/proto"
)
-const (
- panicLineSubstring = "runtime/panic.go"
- runtimePanicFileSubstring = "runtime"
- runtimePanicFunctionSubstring = "runtime."
-)
+const panicLineSubstring = "runtime/panic.go"
var testingKnobShouldCatchPanic bool
@@ -99,13 +95,9 @@ func CatchVectorizedRuntimeError(operation func()) (retErr error) {
// panic frame.
var panicLineFound bool
var panicEmittedFrom string
- // Usually, we'll find the offending frame within 3 program counters,
- // starting with the caller of this deferred function (2 above the
- // runtime.Callers frame). However, we also want to catch panics
- // originating in the Go runtime with the runtime frames being returned
- // by CallersFrames, so we allow for 5 more program counters for that
- // case (e.g. invalid interface conversions use 2 counters).
- pc := make([]uintptr, 8)
+ // We should be able to find it within 3 program counters, starting with the
+ // caller of this deferred function (2 above the runtime.Callers frame).
+ pc := make([]uintptr, 3)
n := runtime.Callers(2, pc)
if n >= 1 {
frames := runtime.CallersFrames(pc[:n])
@@ -115,13 +107,6 @@ func CatchVectorizedRuntimeError(operation func()) (retErr error) {
if strings.Contains(frame.File, panicLineSubstring) {
panicLineFound = true
} else if panicLineFound {
- if strings.HasPrefix(frame.Function, runtimePanicFunctionSubstring) &&
- strings.Contains(frame.File, runtimePanicFileSubstring) {
- // This frame is from the Go runtime, so we simply
- // ignore it to get to the offending one within the
- // CRDB.
- continue
- }
panicEmittedFrom = frame.Function
break
}
diff --git a/pkg/sql/colexecerror/error_test.go b/pkg/sql/colexecerror/error_test.go
index c69010f5b70f..50bc883c646c 100644
--- a/pkg/sql/colexecerror/error_test.go
+++ b/pkg/sql/colexecerror/error_test.go
@@ -87,40 +87,6 @@ func TestNonCatchablePanicIsNotCaught(t *testing.T) {
})
}
-type testInterface interface {
- foo()
-}
-
-type testImpl1 struct{}
-
-var _ testInterface = &testImpl1{}
-
-func (t testImpl1) foo() {}
-
-type testImpl2 struct{}
-
-var _ testInterface = &testImpl2{}
-
-func (t testImpl2) foo() {}
-
-// TestRuntimePanicIsCaught verifies that if a runtime panic occurs in the
-// safe-to-catch package (which this test package is), then it is converted into
-// an internal error (#133167).
-func TestRuntimePanicIsCaught(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
-
- // Use the release-build panic-catching behavior instead of the
- // crdb_test-build behavior.
- defer colexecerror.ProductionBehaviorForTests()()
-
- require.Error(t, colexecerror.CatchVectorizedRuntimeError(func() {
- // Attempt an invalid interface conversion.
- var o testInterface = &testImpl1{}
- _ = o.(*testImpl2)
- }))
-}
-
// BenchmarkCatchVectorizedRuntimeError measures the time for
// CatchVectorizedRuntimeError to catch and process an error.
func BenchmarkCatchVectorizedRuntimeError(b *testing.B) {
diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go
index f9996e9ce514..2744a9215c0d 100644
--- a/pkg/sql/conn_executor.go
+++ b/pkg/sql/conn_executor.go
@@ -3743,7 +3743,7 @@ func (ex *connExecutor) initEvalCtx(ctx context.Context, evalCtx *extendedEvalCo
// catalog reader, then this function will return an non-zero timestamp
// to use for all read operations.
func (ex *connExecutor) GetPCRReaderTimestamp() hlc.Timestamp {
- if ex.isPCRReaderCatalog {
+ if ex.isPCRReaderCatalog && !ex.sessionData().BypassPCRReaderCatalogAOST {
return ex.server.cfg.LeaseManager.GetSafeReplicationTS()
}
return hlc.Timestamp{}
diff --git a/pkg/sql/delegate/show_ranges.go b/pkg/sql/delegate/show_ranges.go
index 776b1235c959..3cd25320dfb2 100644
--- a/pkg/sql/delegate/show_ranges.go
+++ b/pkg/sql/delegate/show_ranges.go
@@ -707,10 +707,6 @@ all_span_stats AS (
if colinfo.Ranges[i].Name == "lease_holder" {
continue
}
- // Skip the errors column; it's used for internal purposes.
- if colinfo.Ranges[i].Name == "errors" {
- continue
- }
fmt.Fprintf(&buf, ",\n %s", tree.NameString(colinfo.Ranges[i].Name))
}
buf.WriteString(",\n span_stats")
diff --git a/pkg/sql/distsql_plan_stats.go b/pkg/sql/distsql_plan_stats.go
index 5a55e87bdfff..77143217310a 100644
--- a/pkg/sql/distsql_plan_stats.go
+++ b/pkg/sql/distsql_plan_stats.go
@@ -529,12 +529,6 @@ func (dsp *DistSQLPlanner) createStatsPlan(
// Create the table readers; for this we initialize a dummy scanNode.
scan := scanNode{desc: desc}
- if colCfg.wantedColumns == nil {
- // wantedColumns cannot be left nil, and if it is nil at this point,
- // then we only have virtual computed columns, so we'll allocate an
- // empty slice.
- colCfg.wantedColumns = []tree.ColumnID{}
- }
err := scan.initDescDefaults(colCfg)
if err != nil {
return nil, err
diff --git a/pkg/sql/exec_log.go b/pkg/sql/exec_log.go
index 86f167465613..1ecb31dd94ab 100644
--- a/pkg/sql/exec_log.go
+++ b/pkg/sql/exec_log.go
@@ -359,58 +359,56 @@ func (p *planner) maybeLogStatementInternal(
defer releaseSampledQuery(sampledQuery)
*sampledQuery = eventpb.SampledQuery{
- CommonSQLExecDetails: execDetails,
- SkippedQueries: skippedQueries,
- CostEstimate: p.curPlan.instrumentation.costEstimate,
- Distribution: p.curPlan.instrumentation.distribution.String(),
- PlanGist: p.curPlan.instrumentation.planGist.String(),
- SessionID: p.extendedEvalCtx.SessionID.String(),
- Database: p.CurrentDatabase(),
- StatementID: p.stmt.QueryID.String(),
- TransactionID: txnID,
- StatementFingerprintID: stmtFingerprintID.String(),
- MaxFullScanRowsEstimate: p.curPlan.instrumentation.maxFullScanRows,
- TotalScanRowsEstimate: p.curPlan.instrumentation.totalScanRows,
- OutputRowsEstimate: p.curPlan.instrumentation.outputRows,
- StatsAvailable: p.curPlan.instrumentation.statsAvailable,
- NanosSinceStatsCollected: int64(p.curPlan.instrumentation.nanosSinceStatsCollected),
- BytesRead: p.curPlan.instrumentation.topLevelStats.bytesRead,
- RowsRead: p.curPlan.instrumentation.topLevelStats.rowsRead,
- RowsWritten: p.curPlan.instrumentation.topLevelStats.rowsWritten,
- InnerJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.InnerJoin]),
- LeftOuterJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.LeftOuterJoin]),
- FullOuterJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.FullOuterJoin]),
- SemiJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.LeftSemiJoin]),
- AntiJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.LeftAntiJoin]),
- IntersectAllJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.IntersectAllJoin]),
- ExceptAllJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.ExceptAllJoin]),
- HashJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.HashJoin]),
- CrossJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.CrossJoin]),
- IndexJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.IndexJoin]),
- LookupJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.LookupJoin]),
- MergeJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.MergeJoin]),
- InvertedJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.InvertedJoin]),
- ApplyJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.ApplyJoin]),
- ZigZagJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.ZigZagJoin]),
- ContentionNanos: queryLevelStats.ContentionTime.Nanoseconds(),
- Regions: queryLevelStats.Regions,
- SQLInstanceIDs: queryLevelStats.SQLInstanceIDs,
- KVNodeIDs: queryLevelStats.KVNodeIDs,
- UsedFollowerRead: queryLevelStats.UsedFollowerRead,
- NetworkBytesSent: queryLevelStats.NetworkBytesSent,
- MaxMemUsage: queryLevelStats.MaxMemUsage,
- MaxDiskUsage: queryLevelStats.MaxDiskUsage,
- KVBytesRead: queryLevelStats.KVBytesRead,
- KVPairsRead: queryLevelStats.KVPairsRead,
- KVRowsRead: queryLevelStats.KVRowsRead,
- KvTimeNanos: queryLevelStats.KVTime.Nanoseconds(),
- KvGrpcCalls: queryLevelStats.KVBatchRequestsIssued,
- NetworkMessages: queryLevelStats.NetworkMessages,
- CpuTimeNanos: queryLevelStats.CPUTime.Nanoseconds(),
- IndexRecommendations: indexRecs,
- // TODO(mgartner): Use a slice of struct{uint64, uint64} instead of
- // converting to strings.
- Indexes: p.curPlan.instrumentation.indexesUsed.Strings(),
+ CommonSQLExecDetails: execDetails,
+ SkippedQueries: skippedQueries,
+ CostEstimate: p.curPlan.instrumentation.costEstimate,
+ Distribution: p.curPlan.instrumentation.distribution.String(),
+ PlanGist: p.curPlan.instrumentation.planGist.String(),
+ SessionID: p.extendedEvalCtx.SessionID.String(),
+ Database: p.CurrentDatabase(),
+ StatementID: p.stmt.QueryID.String(),
+ TransactionID: txnID,
+ StatementFingerprintID: stmtFingerprintID.String(),
+ MaxFullScanRowsEstimate: p.curPlan.instrumentation.maxFullScanRows,
+ TotalScanRowsEstimate: p.curPlan.instrumentation.totalScanRows,
+ OutputRowsEstimate: p.curPlan.instrumentation.outputRows,
+ StatsAvailable: p.curPlan.instrumentation.statsAvailable,
+ NanosSinceStatsCollected: int64(p.curPlan.instrumentation.nanosSinceStatsCollected),
+ BytesRead: p.curPlan.instrumentation.topLevelStats.bytesRead,
+ RowsRead: p.curPlan.instrumentation.topLevelStats.rowsRead,
+ RowsWritten: p.curPlan.instrumentation.topLevelStats.rowsWritten,
+ InnerJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.InnerJoin]),
+ LeftOuterJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.LeftOuterJoin]),
+ FullOuterJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.FullOuterJoin]),
+ SemiJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.LeftSemiJoin]),
+ AntiJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.LeftAntiJoin]),
+ IntersectAllJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.IntersectAllJoin]),
+ ExceptAllJoinCount: int64(p.curPlan.instrumentation.joinTypeCounts[descpb.ExceptAllJoin]),
+ HashJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.HashJoin]),
+ CrossJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.CrossJoin]),
+ IndexJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.IndexJoin]),
+ LookupJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.LookupJoin]),
+ MergeJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.MergeJoin]),
+ InvertedJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.InvertedJoin]),
+ ApplyJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.ApplyJoin]),
+ ZigZagJoinCount: int64(p.curPlan.instrumentation.joinAlgorithmCounts[exec.ZigZagJoin]),
+ ContentionNanos: queryLevelStats.ContentionTime.Nanoseconds(),
+ Regions: queryLevelStats.Regions,
+ SQLInstanceIDs: queryLevelStats.SQLInstanceIDs,
+ KVNodeIDs: queryLevelStats.KVNodeIDs,
+ UsedFollowerRead: queryLevelStats.UsedFollowerRead,
+ NetworkBytesSent: queryLevelStats.NetworkBytesSent,
+ MaxMemUsage: queryLevelStats.MaxMemUsage,
+ MaxDiskUsage: queryLevelStats.MaxDiskUsage,
+ KVBytesRead: queryLevelStats.KVBytesRead,
+ KVPairsRead: queryLevelStats.KVPairsRead,
+ KVRowsRead: queryLevelStats.KVRowsRead,
+ KvTimeNanos: queryLevelStats.KVTime.Nanoseconds(),
+ KvGrpcCalls: queryLevelStats.KVBatchRequestsIssued,
+ NetworkMessages: queryLevelStats.NetworkMessages,
+ CpuTimeNanos: queryLevelStats.CPUTime.Nanoseconds(),
+ IndexRecommendations: indexRecs,
+ Indexes: p.curPlan.instrumentation.indexesUsed,
ScanCount: int64(p.curPlan.instrumentation.scanCounts[exec.ScanCount]),
ScanWithStatsCount: int64(p.curPlan.instrumentation.scanCounts[exec.ScanWithStatsCount]),
ScanWithStatsForecastCount: int64(p.curPlan.instrumentation.scanCounts[exec.ScanWithStatsForecastCount]),
diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go
index 7a067fa968cf..b1ab34b38265 100644
--- a/pkg/sql/exec_util.go
+++ b/pkg/sql/exec_util.go
@@ -3888,6 +3888,10 @@ func (m *sessionDataMutator) SetOptimizerPushLimitIntoProjectFilteredScan(val bo
m.data.OptimizerPushLimitIntoProjectFilteredScan = val
}
+func (m *sessionDataMutator) SetBypassPCRReaderCatalogAOST(val bool) {
+ m.data.BypassPCRReaderCatalogAOST = val
+}
+
// Utility functions related to scrubbing sensitive information on SQL Stats.
// quantizeCounts ensures that the Count field in the
diff --git a/pkg/sql/execinfrapb/processors_bulk_io.proto b/pkg/sql/execinfrapb/processors_bulk_io.proto
index 4c2477639b2f..41c51217ca4c 100644
--- a/pkg/sql/execinfrapb/processors_bulk_io.proto
+++ b/pkg/sql/execinfrapb/processors_bulk_io.proto
@@ -438,7 +438,8 @@ message GenerativeSplitAndScatterSpec {
// Spans is the required spans in the restore.
repeated roachpb.Span spans = 10 [(gogoproto.nullable) = false];
repeated jobs.jobspb.RestoreDetails.BackupLocalityInfo backup_locality_info = 11 [(gogoproto.nullable) = false];
- reserved 12;
+ // HighWater is the high watermark of the previous run of restore.
+ optional bytes high_water = 12;
// User who initiated the restore.
optional string user_proto = 13 [(gogoproto.nullable) = false, (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/security/username.SQLUsernameProto"];
// ChunkSize is the number of import spans per chunk.
@@ -450,7 +451,7 @@ message GenerativeSplitAndScatterSpec {
// NumNodes is the number of nodes available for dist restore.
optional int64 num_nodes = 17[(gogoproto.nullable) = false];
optional int64 job_id = 18 [(gogoproto.nullable) = false, (gogoproto.customname) = "JobID"];
- reserved 20 ;
+ optional bool use_frontier_checkpointing = 20 [(gogoproto.nullable) = false];
repeated jobs.jobspb.RestoreProgress.FrontierEntry checkpointed_spans = 21 [(gogoproto.nullable) = false];
// ExclusiveFileSpanComparison is true if the backup can safely use
// exclusive file span comparison.
diff --git a/pkg/sql/executor_statement_metrics.go b/pkg/sql/executor_statement_metrics.go
index d2d7d302ea81..5056ec6559be 100644
--- a/pkg/sql/executor_statement_metrics.go
+++ b/pkg/sql/executor_statement_metrics.go
@@ -208,10 +208,8 @@ func (ex *connExecutor) recordStatementSummary(
EndTime: phaseTimes.GetSessionPhaseTime(sessionphase.PlannerStartExecStmt).Add(svcLatRaw),
FullScan: fullScan,
ExecStats: queryLevelStats,
- // TODO(mgartner): Use a slice of struct{uint64, uint64} instead of
- // converting to strings.
- Indexes: planner.instrumentation.indexesUsed.Strings(),
- Database: planner.SessionData().Database,
+ Indexes: planner.instrumentation.indexesUsed,
+ Database: planner.SessionData().Database,
}
stmtFingerprintID, err :=
diff --git a/pkg/sql/instrumentation.go b/pkg/sql/instrumentation.go
index 4f8eaca6af36..e6c76d858f1a 100644
--- a/pkg/sql/instrumentation.go
+++ b/pkg/sql/instrumentation.go
@@ -26,7 +26,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/idxrecommendations"
"github.com/cockroachdb/cockroach/pkg/sql/isql"
"github.com/cockroachdb/cockroach/pkg/sql/opt/exec"
- "github.com/cockroachdb/cockroach/pkg/sql/opt/exec/execbuilder"
"github.com/cockroachdb/cockroach/pkg/sql/opt/exec/explain"
"github.com/cockroachdb/cockroach/pkg/sql/opt/indexrec"
"github.com/cockroachdb/cockroach/pkg/sql/opt/optbuilder"
@@ -226,7 +225,7 @@ type instrumentationHelper struct {
scanCounts [exec.NumScanCountTypes]int
// indexesUsed list the indexes used in the query with format tableID@indexID.
- indexesUsed execbuilder.IndexesUsed
+ indexesUsed []string
// schemachangerMode indicates which schema changer mode was used to execute
// the query.
diff --git a/pkg/sql/inverted/expression.go b/pkg/sql/inverted/expression.go
index c87de68999ae..1a88cc71976a 100644
--- a/pkg/sql/inverted/expression.go
+++ b/pkg/sql/inverted/expression.go
@@ -167,7 +167,7 @@ func formatSpan(span Span, redactable bool) string {
}
output := fmt.Sprintf("[%s, %s%c", start, end, spanEndOpenOrClosed)
if redactable {
- output = string(redact.Sprintf("%s", encoding.Unsafe(output)))
+ output = string(redact.Sprintf("%s", redact.Unsafe(output)))
}
return output
}
diff --git a/pkg/sql/logictest/logic.go b/pkg/sql/logictest/logic.go
index de27937a74cb..0d8ffea26318 100644
--- a/pkg/sql/logictest/logic.go
+++ b/pkg/sql/logictest/logic.go
@@ -1425,7 +1425,9 @@ func (t *logicTest) handleWaitForInitErr(ts testserver.TestServer, err error) {
if walkErr != nil {
t.t().Logf("error while walking logs directory: %v", walkErr)
} else if foundSnappyErr {
- ts.Stop()
+ if ts != nil {
+ ts.Stop()
+ }
t.t().Skip("ignoring init did not finish for node error due to snappy error")
}
}
diff --git a/pkg/sql/logictest/testdata/logic_test/alter_column_type b/pkg/sql/logictest/testdata/logic_test/alter_column_type
index 87916bbcb56f..26cdd70d9078 100644
--- a/pkg/sql/logictest/testdata/logic_test/alter_column_type
+++ b/pkg/sql/logictest/testdata/logic_test/alter_column_type
@@ -265,12 +265,17 @@ INSERT INTO t6 VALUES (1), (2), (3);
statement ok
ALTER TABLE t6 ALTER COLUMN id2 TYPE STRING;
-query TTBTTTB rowsort
-SHOW COLUMNS FROM t6
+query TT
+SHOW CREATE TABLE t6
----
-id INT8 true NULL · {t6_pkey} false
-id2 STRING true NULL · {t6_pkey} false
-rowid INT8 false unique_rowid() · {t6_pkey} true
+t6 CREATE TABLE public.t6 (
+ id INT8 NULL,
+ id2 STRING NULL,
+ rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(),
+ CONSTRAINT t6_pkey PRIMARY KEY (rowid ASC),
+ FAMILY f1 (id, rowid),
+ FAMILY f2 (id2)
+ )
# Ensure the type of the default column is checked
statement ok
@@ -295,7 +300,7 @@ INSERT INTO t8 VALUES ('hello')
statement error pq: column "x" cannot be cast automatically to type INT8\nHINT: You might need to specify "USING x::INT8".
ALTER TABLE t8 ALTER COLUMN x TYPE INT
-statement error .*could not parse "hello" as type int: strconv.ParseInt: parsing "hello": invalid syntax
+statement error pq: could not parse "hello" as type int: strconv.ParseInt: parsing "hello": invalid syntax
ALTER TABLE t8 ALTER COLUMN x TYPE INT USING x::INT8
query TT
@@ -437,36 +442,15 @@ ROLLBACK
statement ok
CREATE TABLE t21 (x INT);
-statement ok
-INSERT INTO t21 VALUES (888),(-32760);
-
statement error pq: unimplemented: ALTER COLUMN TYPE cannot be used in combination with other ALTER TABLE commands
ALTER TABLE t21 ALTER COLUMN x TYPE STRING, ALTER COLUMN x SET NOT NULL;
-statement error pq: unimplemented: ALTER COLUMN TYPE cannot be used in combination with other ALTER TABLE commands
-ALTER TABLE t21 ALTER COLUMN x TYPE VARCHAR(30), ALTER COLUMN x SET NOT VISIBLE;
-
-query TTBTTTB rowsort
-SHOW COLUMNS FROM t21
-----
-x INT8 true NULL · {t21_pkey} false
-rowid INT8 false unique_rowid() · {t21_pkey} true
-
statement ok
CREATE TABLE t22 (x INT);
-statement ok
-INSERT INTO t22 VALUES (0),(-5);
-
statement error pq: unimplemented: ALTER COLUMN TYPE cannot be used in combination with other ALTER TABLE commands
ALTER TABLE t22 ALTER COLUMN x SET NOT NULL, ALTER COLUMN x TYPE STRING;
-query TTBTTTB rowsort
-SHOW COLUMNS FROM t22
-----
-x INT8 true NULL · {t22_pkey} false
-rowid INT8 false unique_rowid() · {t22_pkey} true
-
# Ensure ALTER COLUMN TYPE USING EXPRESSION works.
statement ok
CREATE TABLE t23 (x INT);
@@ -493,7 +477,7 @@ CREATE TABLE t24 (x STRING);
statement ok
INSERT INTO t24 VALUES ('1'), ('hello');
-statement error .*could not parse "hello" as type int: strconv.ParseInt: parsing "hello": invalid syntax
+statement error pq: could not parse "hello" as type int: strconv.ParseInt: parsing "hello": invalid syntax
ALTER TABLE t24 ALTER COLUMN x TYPE INT USING (x::int + 5)
query TT colnames
@@ -601,7 +585,7 @@ INSERT INTO t30 VALUES (e'a\\01');
statement error pq: column "x" cannot be cast automatically to type BYTES\nHINT: You might need to specify "USING x::BYTES".
ALTER TABLE t30 ALTER COLUMN x TYPE BYTES
-statement error .*could not parse "a\\\\01" as type bytes: bytea encoded value ends with incomplete escape sequence
+statement error pq: could not parse "a\\\\01" as type bytes: bytea encoded value ends with incomplete escape sequence
ALTER TABLE t30 ALTER COLUMN x TYPE BYTES USING x::BYTES
# Ensure that dependent views prevent column type modification.
@@ -957,7 +941,7 @@ ALTER TABLE t_bytes ALTER COLUMN c2 SET DATA TYPE CHAR(4);
statement error pq: column "c3" cannot be cast automatically to type UUID\nHINT: You might need to specify "USING c3::UUID".
ALTER TABLE t_bytes ALTER COLUMN c3 SET DATA TYPE UUID;
-statement error .*could not parse "worldhello" as type uuid: uuid: UUID must be exactly 16 bytes long, got 10 bytes
+statement error pq: could not parse "worldhello" as type uuid: uuid: UUID must be exactly 16 bytes long, got 10 bytes
ALTER TABLE t_bytes ALTER COLUMN c3 SET DATA TYPE UUID USING c3::UUID;
statement ok
@@ -977,13 +961,17 @@ SELECT c2,c3 FROM t_bytes ORDER BY c1;
w NULL
w 3b5692c8-0f73-49ec-9186-8f1478f3064a
-query TTBTTTB rowsort
-SHOW COLUMNS FROM t_bytes;
+query TT
+SHOW CREATE TABLE t_bytes;
----
-c1 STRING true NULL · {t_bytes_pkey} false
-c2 CHAR(4) true NULL · {t_bytes_pkey} false
-c3 UUID true NULL · {t_bytes_pkey} false
-rowid INT8 false unique_rowid() · {t_bytes_pkey} true
+t_bytes CREATE TABLE public.t_bytes (
+ c1 STRING NULL,
+ c2 CHAR(4) NULL,
+ c3 UUID NULL,
+ rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(),
+ CONSTRAINT t_bytes_pkey PRIMARY KEY (rowid ASC),
+ FAMILY f1 (c1, c2, c3, rowid)
+ )
statement ok
DROP TABLE t_bytes;
@@ -1041,12 +1029,16 @@ NULL NULL
10012.34 4563.21
12345.60 1.23
-query TTBTTTB rowsort
-SHOW COLUMNS FROM t_decimal;
+query TT
+SHOW CREATE TABLE t_decimal;
----
-c1 DECIMAL(7,2) true NULL · {t_decimal_pkey} false
-c2 DECIMAL(10,2) true NULL · {t_decimal_pkey} false
-rowid INT8 false unique_rowid() · {t_decimal_pkey} true
+t_decimal CREATE TABLE public.t_decimal (
+ c1 DECIMAL(7,2) NULL,
+ c2 DECIMAL(10,2) NULL,
+ rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(),
+ CONSTRAINT t_decimal_pkey PRIMARY KEY (rowid ASC),
+ FAMILY f1 (c1, c2, rowid)
+ )
statement ok
DROP TABLE t_decimal;
@@ -1155,15 +1147,19 @@ SELECT c1,c2,c3,c4,c5 FROM t_bit_string ORDER BY pk;
1010 1010 hello worl worldh
NULL NULL NULL NULL NULL
-query TTBTTTB rowsort
-SHOW COLUMNS FROM t_bit_string;
+query TT
+SHOW CREATE TABLE t_bit_string;
----
-c1 BIT(4) true NULL · {t_bit_string_pkey} false
-c2 VARBIT(4) true NULL · {t_bit_string_pkey} false
-c3 BYTES true NULL · {t_bit_string_pkey} false
-c4 VARCHAR(4) true NULL · {t_bit_string_pkey} false
-c5 CHAR(6) true NULL · {t_bit_string_pkey} false
-pk INT8 false NULL · {t_bit_string_pkey} false
+t_bit_string CREATE TABLE public.t_bit_string (
+ pk INT8 NOT NULL,
+ c1 BIT(4) NULL,
+ c2 VARBIT(4) NULL,
+ c3 BYTES NULL,
+ c4 VARCHAR(4) NULL,
+ c5 CHAR(6) NULL,
+ CONSTRAINT t_bit_string_pkey PRIMARY KEY (pk ASC),
+ FAMILY f1 (pk, c1, c2, c3, c4, c5)
+ )
statement ok
DROP TABLE t_bit_string;
@@ -1206,11 +1202,15 @@ SELECT c1 FROM t_int ORDER BY pk;
32767
NULL
-query TTBTTTB rowsort
-SHOW COLUMNS FROM t_int;
+query TT
+SHOW CREATE TABLE t_int;
----
-c1 INT2 true NULL · {t_int_pkey} false
-pk INT8 false NULL · {t_int_pkey} false
+t_int CREATE TABLE public.t_int (
+ pk INT8 NOT NULL,
+ c1 INT2 NULL,
+ CONSTRAINT t_int_pkey PRIMARY KEY (pk ASC),
+ FAMILY f1 (pk, c1)
+ )
statement ok
DROP TABLE t_int;
@@ -1232,95 +1232,4 @@ alter table roach_village alter column age set data type bigint;
statement ok
alter table roach_village alter column legs set data type bigint;
-subtest convert_column_many
-
-statement ok
-create table t_many (c1 smallint);
-
-statement ok
-insert into t_many values (0),(100),(-32);
-
-# Without USING but auto cast works
-statement ok
-alter table t_many alter column c1 set data type text;
-
-query T rowsort
-SELECT * FROM t_many;
-----
-0
-100
--32
-
-query TTBTTTB rowsort
-SHOW COLUMNS FROM t_many;
-----
-c1 STRING true NULL · {t_many_pkey} false
-rowid INT8 false unique_rowid() · {t_many_pkey} true
-
-# STRING -> SMALLINT with USING
-statement ok
-alter table t_many alter column c1 set data type smallint using c1::smallint;
-
-query I rowsort
-SELECT * FROM t_many;
-----
-0
-100
--32
-
-query TTBTTTB rowsort
-SHOW COLUMNS FROM t_many;
-----
-c1 INT2 true NULL · {t_many_pkey} false
-rowid INT8 false unique_rowid() · {t_many_pkey} true
-
-statement error pq: unimplemented: ALTER COLUMN TYPE cannot be used in combination with other ALTER TABLE commands
-alter table t_many alter column c1 set not null, alter column c1 set data type VARCHAR(10) using concat(c1::text, 'boo');
-
-statement ok
-alter table t_many alter column c1 set data type VARCHAR(10) using concat(c1::text, 'boo');
-
-query T rowsort
-SELECT c1 FROM t_many;
-----
-0boo
-100boo
--32boo
-
-query TTBTTTB rowsort
-SHOW COLUMNS FROM t_many;
-----
-c1 VARCHAR(10) true NULL · {t_many_pkey} false
-rowid INT8 false unique_rowid() · {t_many_pkey} true
-
-# VARCHAR(10) -> INT4 attempt, but fail because of USING expression failure
-statement error .*could not parse "" as type int: strconv.ParseInt: parsing ""
-alter table t_many alter column c1 set data type int4 using trim('boo', c1)::int4;
-
-query TTBTTTB rowsort
-SHOW COLUMNS FROM t_many;
-----
-c1 VARCHAR(10) true NULL · {t_many_pkey} false
-rowid INT8 false unique_rowid() · {t_many_pkey} true
-
-# Attempt again but with expression fixed.
-statement ok
-alter table t_many alter column c1 set data type int4 using trim(trailing 'boo' from c1)::int4;
-
-query I rowsort
-SELECT * FROM t_many;
-----
-0
-100
--32
-
-query TTBTTTB rowsort
-SHOW COLUMNS FROM t_many;
-----
-c1 INT4 true NULL · {t_many_pkey} false
-rowid INT8 false unique_rowid() · {t_many_pkey} true
-
-statement ok
-DROP TABLE t_many;
-
subtest end
diff --git a/pkg/sql/logictest/testdata/logic_test/collatedstring b/pkg/sql/logictest/testdata/logic_test/collatedstring
index 8cae998585a2..a50d87fce7a7 100644
--- a/pkg/sql/logictest/testdata/logic_test/collatedstring
+++ b/pkg/sql/logictest/testdata/logic_test/collatedstring
@@ -1,8 +1,10 @@
statement error pq: invalid locale bad_locale: language: subtag "locale" is well-formed but unknown
SELECT 'a' COLLATE bad_locale
-statement error pq: unsupported comparison operator: =
+query B
SELECT 'A' COLLATE en = 'a'
+----
+false
statement error pq: unsupported comparison operator: =
SELECT 'A' COLLATE en = 'a' COLLATE de
@@ -560,3 +562,51 @@ SELECT * FROM t45142 WHERE c < SOME ('' COLLATE en, '' COLLATE de);
statement ok
SELECT * FROM t45142 WHERE c < SOME (CASE WHEN true THEN NULL END, '' COLLATE en);
SELECT * FROM t45142 WHERE c < SOME ('' COLLATE en, CASE WHEN true THEN NULL END);
+
+subtest issue_132867
+
+statement ok
+CREATE TABLE test_collate (
+ id INT8 PRIMARY KEY,
+ "string_field" STRING COLLATE en_US_u_ks_level2 NULL
+)
+
+statement ok
+INSERT INTO test_collate VALUES (1, 'Str_Collate_1')
+
+query IT
+SELECT * FROM test_collate WHERE (("id", "string_field")) = ANY(Array[(1, 'str_collate_1')])
+----
+1 Str_Collate_1
+
+query I
+SELECT id FROM test_collate WHERE "string_field" = 'sTR_cOLLATE_1'
+----
+1
+
+statement ok
+INSERT INTO test_collate VALUES (2, 'Foo'), (3, 'Bar'), (4, 'Baz')
+
+query T
+SELECT string_field FROM test_collate WHERE string_field < 'baz' ORDER BY id
+----
+Bar
+
+query T
+SELECT string_field FROM test_collate WHERE string_field <= 'baz' ORDER BY id
+----
+Bar
+Baz
+
+query T
+SELECT string_field FROM test_collate WHERE string_field > 'baz' ORDER BY id
+----
+Str_Collate_1
+Foo
+
+query T
+SELECT string_field FROM test_collate WHERE string_field >= 'baz' ORDER BY id
+----
+Str_Collate_1
+Foo
+Baz
diff --git a/pkg/sql/logictest/testdata/logic_test/crdb_internal b/pkg/sql/logictest/testdata/logic_test/crdb_internal
index 7cced2914642..6b9001950c3a 100644
--- a/pkg/sql/logictest/testdata/logic_test/crdb_internal
+++ b/pkg/sql/logictest/testdata/logic_test/crdb_internal
@@ -533,10 +533,10 @@ SELECT * FROM crdb_internal.node_inflight_trace_spans WHERE span_id < 0
----
trace_id parent_span_id span_id goroutine_id finished start_time duration operation
-query ITTTTITTTTTTIT colnames
+query ITTTTITTTTTTI colnames
SELECT * FROM crdb_internal.ranges WHERE range_id < 0
----
-range_id start_key start_pretty end_key end_pretty replicas replica_localities voting_replicas non_voting_replicas learner_replicas split_enforced_until lease_holder range_size errors
+range_id start_key start_pretty end_key end_pretty replicas replica_localities voting_replicas non_voting_replicas learner_replicas split_enforced_until lease_holder range_size
query ITTTTTTTTTT colnames
SELECT * FROM crdb_internal.ranges_no_leases WHERE range_id < 0
diff --git a/pkg/sql/logictest/testdata/logic_test/crdb_internal_catalog b/pkg/sql/logictest/testdata/logic_test/crdb_internal_catalog
index a599c130919b..fbd89d79a784 100644
--- a/pkg/sql/logictest/testdata/logic_test/crdb_internal_catalog
+++ b/pkg/sql/logictest/testdata/logic_test/crdb_internal_catalog
@@ -105,7 +105,7 @@ skipif config local-mixed-24.2
query IT
SELECT id, strip_volatile(descriptor) FROM crdb_internal.kv_catalog_descriptor ORDER BY id
----
-1 {"database": {"id": 1, "name": "system", "privileges": {"ownerProto": "node", "users": [{"privileges": "2048", "userProto": "admin", "withGrantOption": "2048"}, {"privileges": "2048", "userProto": "root", "withGrantOption": "2048"}], "version": 3}, "systemDatabaseSchemaVersion": {"internal": 22, "majorVal": 1000024, "minorVal": 2}, "version": "1"}}
+1 {"database": {"id": 1, "name": "system", "privileges": {"ownerProto": "node", "users": [{"privileges": "2048", "userProto": "admin", "withGrantOption": "2048"}, {"privileges": "2048", "userProto": "root", "withGrantOption": "2048"}], "version": 3}, "systemDatabaseSchemaVersion": {"internal": 22, "majorVal": 24, "minorVal": 2}, "version": "1"}}
3 {"table": {"columns": [{"id": 1, "name": "id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "descriptor", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 3, "name": "descriptor", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["id"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [2], "storeColumnNames": ["descriptor"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "admin", "withGrantOption": "32"}, {"privileges": "32", "userProto": "root", "withGrantOption": "32"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}}
4 {"table": {"columns": [{"id": 1, "name": "username", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "hashedPassword", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"defaultExpr": "false", "id": 3, "name": "isRole", "type": {"oid": 16}}, {"id": 4, "name": "user_id", "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4, "indexes": [{"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [4], "keyColumnNames": ["user_id"], "keySuffixColumnIds": [1], "name": "users_user_id_idx", "partitioning": {}, "sharded": {}, "unique": true, "version": 3}], "name": "users", "nextColumnId": 5, "nextConstraintId": 3, "nextIndexId": 3, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 2, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["username"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4], "storeColumnNames": ["hashedPassword", "isRole", "user_id"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "2"}}
5 {"table": {"columns": [{"id": 1, "name": "id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "config", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 5, "name": "zones", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["id"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [2], "storeColumnNames": ["config"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}}
@@ -450,7 +450,7 @@ SELECT id, strip_volatile(descriptor) FROM crdb_internal.kv_catalog_descriptor O
4294967233 {"table": {"columns": [{"id": 1, "name": "span_idx", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "message_idx", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "timestamp", "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 4, "name": "duration", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 5, "name": "operation", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "loc", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "tag", "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "message", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "age", "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}], "formatVersion": 3, "id": 4294967233, "name": "session_trace", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}}
4294967234 {"table": {"columns": [{"id": 1, "name": "table_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "parent_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "target_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "target_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "state", "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "direction", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967234, "name": "schema_changes", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}}
4294967235 {"table": {"columns": [{"id": 1, "name": "node_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "component", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "field", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "value", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967235, "name": "node_runtime_info", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}}
-4294967236 {"table": {"columns": [{"id": 1, "name": "range_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "start_key", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "start_pretty", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "end_key", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 5, "name": "end_pretty", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "replicas", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 7, "name": "replica_localities", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 8, "name": "voting_replicas", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 9, "name": "non_voting_replicas", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 10, "name": "learner_replicas", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 11, "name": "split_enforced_until", "nullable": true, "type": {"family": "TimestampFamily", "oid": 1114}}, {"id": 12, "name": "lease_holder", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "range_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "errors", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967236, "name": "ranges", "nextColumnId": 15, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1", "viewQuery": "SELECT range_id, start_key, start_pretty, end_key, end_pretty, replicas, replica_localities, voting_replicas, non_voting_replicas, learner_replicas, split_enforced_until, (crdb_internal.lease_holder_with_errors(start_key)->>'Leaseholder')::INT8 AS lease_holder, ((((crdb_internal.range_stats_with_errors(start_key)->'RangeStats')->>'key_bytes')::INT8 + ((crdb_internal.range_stats_with_errors(start_key)->'RangeStats')->>'val_bytes')::INT8) + COALESCE(((crdb_internal.range_stats_with_errors(start_key)->'RangeStats')->>'range_key_bytes')::INT8, 0)) + COALESCE(((crdb_internal.range_stats_with_errors(start_key)->'RangeStats')->>'range_val_bytes')::INT8, 0) AS range_size, concat(crdb_internal.lease_holder_with_errors(start_key)->>'Error', ' ', crdb_internal.range_stats_with_errors(start_key)->>'Error') AS errors FROM crdb_internal.ranges_no_leases"}}
+4294967236 {"table": {"columns": [{"id": 1, "name": "range_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "start_key", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "start_pretty", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "end_key", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 5, "name": "end_pretty", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "replicas", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 7, "name": "replica_localities", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 8, "name": "voting_replicas", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 9, "name": "non_voting_replicas", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 10, "name": "learner_replicas", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 11, "name": "split_enforced_until", "nullable": true, "type": {"family": "TimestampFamily", "oid": 1114}}, {"id": 12, "name": "lease_holder", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "range_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967236, "name": "ranges", "nextColumnId": 14, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1", "viewQuery": "SELECT range_id, start_key, start_pretty, end_key, end_pretty, replicas, replica_localities, voting_replicas, non_voting_replicas, learner_replicas, split_enforced_until, crdb_internal.lease_holder(start_key) AS lease_holder, (((crdb_internal.range_stats(start_key)->>'key_bytes')::INT8 + (crdb_internal.range_stats(start_key)->>'val_bytes')::INT8) + COALESCE((crdb_internal.range_stats(start_key)->>'range_key_bytes')::INT8, 0)) + COALESCE((crdb_internal.range_stats(start_key)->>'range_val_bytes')::INT8, 0) AS range_size FROM crdb_internal.ranges_no_leases"}}
4294967237 {"table": {"columns": [{"id": 1, "name": "range_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "start_key", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "start_pretty", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "end_key", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 5, "name": "end_pretty", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "replicas", "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 7, "name": "replica_localities", "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 8, "name": "voting_replicas", "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 9, "name": "non_voting_replicas", "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 10, "name": "learner_replicas", "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 11, "name": "split_enforced_until", "nullable": true, "type": {"family": "TimestampFamily", "oid": 1114}}], "formatVersion": 3, "id": 4294967237, "name": "ranges_no_leases", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}}
4294967238 {"table": {"columns": [{"id": 1, "name": "table_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "index_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "parent_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "columns", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "column_names", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "list_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "range_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "zone_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "subzone_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967238, "name": "partitions", "nextColumnId": 11, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}}
4294967239 {"table": {"columns": [{"id": 1, "name": "node_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "application_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "txn_count", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "txn_time_avg_sec", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 5, "name": "txn_time_var_sec", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 6, "name": "committed_count", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "implicit_count", "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967239, "name": "node_txn_stats", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}}
diff --git a/pkg/sql/logictest/testdata/logic_test/create_table b/pkg/sql/logictest/testdata/logic_test/create_table
index e46d9b95e9d0..e24f90eb88bb 100644
--- a/pkg/sql/logictest/testdata/logic_test/create_table
+++ b/pkg/sql/logictest/testdata/logic_test/create_table
@@ -1097,3 +1097,16 @@ statement ok
SET inject_retry_errors_enabled=false
subtest end
+
+
+# Addresses a bug where parsing nextval expressions with extra values could end
+# end up with a panic when rewriting sequence expressions.
+subtest 133399
+
+statement ok
+CREATE TABLE v_133399 (c01 INT);
+
+statement error pgcode 42804 could not find matching function overload for given arguments
+CREATE TABLE t_133399 AS (SELECT * FROM v_133399 WINDOW window_name AS (ROWS c01 BETWEEN nextval ('abc', 'abc', 'abc') AND c01 PRECEDING));
+
+subtest end
diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_stats b/pkg/sql/logictest/testdata/logic_test/distsql_stats
index de8c652acac9..7c7f8e4f646f 100644
--- a/pkg/sql/logictest/testdata/logic_test/distsql_stats
+++ b/pkg/sql/logictest/testdata/logic_test/distsql_stats
@@ -2869,14 +2869,6 @@ upper_bound range_rows distinct_range_rows equal_rows
'{"bar": {"baz": 2}}' 0 0 1
'{"bar": {"baz": 3}}' 0 0 1
-# Regression test for collecting stats on a table only with virtual computed
-# columns (#130817).
-statement ok
-CREATE TABLE t130817 (k INT PRIMARY KEY AS (NULL) VIRTUAL);
-
-statement ok
-ANALYZE t130817;
-
# Test partial stats using extremes on indexed virtual computed columns.
statement ok
INSERT INTO t68254 (a, b, c) VALUES (5, '5', '{"foo": {"bar": {"baz": 5}}}')
diff --git a/pkg/sql/logictest/testdata/logic_test/grant_on_all_tables_in_schema b/pkg/sql/logictest/testdata/logic_test/grant_on_all_tables_in_schema
index d59f386ea42e..01fba7ee9cd5 100644
--- a/pkg/sql/logictest/testdata/logic_test/grant_on_all_tables_in_schema
+++ b/pkg/sql/logictest/testdata/logic_test/grant_on_all_tables_in_schema
@@ -132,3 +132,12 @@ database_name schema_name table_name grantee privilege_type is_grantable
otherdb public tbl admin ALL true
otherdb public tbl root ALL true
otherdb public tbl testuser SELECT false
+
+statement ok
+CREATE TABLE t131157 (c1 INT)
+
+statement ok
+GRANT ALL ON t131157 TO testuser
+
+statement error t131157 is not a sequence
+REVOKE CREATE ON SEQUENCE t131157 FROM testuser
diff --git a/pkg/sql/logictest/testdata/logic_test/information_schema b/pkg/sql/logictest/testdata/logic_test/information_schema
index b43cbc74aefb..c5c8bb9f91cf 100644
--- a/pkg/sql/logictest/testdata/logic_test/information_schema
+++ b/pkg/sql/logictest/testdata/logic_test/information_schema
@@ -6221,6 +6221,7 @@ application_name ·
authentication_method cert-password
avoid_buffering off
backslash_quote safe_encoding
+bypass_pcr_reader_catalog_aost off
bytea_output hex
check_function_bodies on
client_encoding UTF8
diff --git a/pkg/sql/logictest/testdata/logic_test/pg_catalog b/pkg/sql/logictest/testdata/logic_test/pg_catalog
index 36a158fc8905..7befe6b70c17 100644
--- a/pkg/sql/logictest/testdata/logic_test/pg_catalog
+++ b/pkg/sql/logictest/testdata/logic_test/pg_catalog
@@ -2869,6 +2869,7 @@ authentication_method cert-password N
autocommit_before_ddl off NULL NULL NULL string
avoid_buffering off NULL NULL NULL string
backslash_quote safe_encoding NULL NULL NULL string
+bypass_pcr_reader_catalog_aost off NULL NULL NULL string
bytea_output hex NULL NULL NULL string
check_function_bodies on NULL NULL NULL string
client_encoding UTF8 NULL NULL NULL string
@@ -3062,6 +3063,7 @@ authentication_method cert-password N
autocommit_before_ddl off NULL user NULL off off
avoid_buffering off NULL user NULL off off
backslash_quote safe_encoding NULL user NULL safe_encoding safe_encoding
+bypass_pcr_reader_catalog_aost off NULL user NULL off off
bytea_output hex NULL user NULL hex hex
check_function_bodies on NULL user NULL on on
client_encoding UTF8 NULL user NULL UTF8 UTF8
@@ -3248,6 +3250,7 @@ authentication_method NULL NULL NULL
autocommit_before_ddl NULL NULL NULL NULL NULL
avoid_buffering NULL NULL NULL NULL NULL
backslash_quote NULL NULL NULL NULL NULL
+bypass_pcr_reader_catalog_aost NULL NULL NULL NULL NULL
bytea_output NULL NULL NULL NULL NULL
check_function_bodies NULL NULL NULL NULL NULL
client_encoding NULL NULL NULL NULL NULL
diff --git a/pkg/sql/logictest/testdata/logic_test/show_source b/pkg/sql/logictest/testdata/logic_test/show_source
index 2e6e1791889a..22b2a45059ae 100644
--- a/pkg/sql/logictest/testdata/logic_test/show_source
+++ b/pkg/sql/logictest/testdata/logic_test/show_source
@@ -35,6 +35,7 @@ authentication_method cert-password
autocommit_before_ddl off
avoid_buffering off
backslash_quote safe_encoding
+bypass_pcr_reader_catalog_aost off
bytea_output hex
check_function_bodies on
client_encoding UTF8
diff --git a/pkg/sql/logictest/testdata/logic_test/system b/pkg/sql/logictest/testdata/logic_test/system
index 9018c907c3dd..7acb9eb6fc7c 100644
--- a/pkg/sql/logictest/testdata/logic_test/system
+++ b/pkg/sql/logictest/testdata/logic_test/system
@@ -1440,7 +1440,7 @@ SELECT name, value FROM system.settings WHERE name = 'sql.defaults.vectorize'
----
sql.defaults.vectorize 1
-query T
+query T retry
SHOW CLUSTER SETTING sql.defaults.vectorize
----
on
@@ -1468,7 +1468,7 @@ query TT
SELECT name, value FROM system.settings WHERE name = 'sql.defaults.vectorize'
----
-query T
+query T retry
SHOW CLUSTER SETTING sql.defaults.vectorize
----
on
diff --git a/pkg/sql/logictest/testdata/logic_test/truncate_with_concurrent_mutation b/pkg/sql/logictest/testdata/logic_test/truncate_with_concurrent_mutation
index 218070ff522e..055b19053f01 100644
--- a/pkg/sql/logictest/testdata/logic_test/truncate_with_concurrent_mutation
+++ b/pkg/sql/logictest/testdata/logic_test/truncate_with_concurrent_mutation
@@ -93,11 +93,6 @@ SET enable_experimental_alter_column_type_general = true;
statement error pq: job \d+ was paused before it completed with reason: pause point
ALTER TABLE t5 ALTER COLUMN b TYPE STRING;
-# The ALTER COLUMN TYPE operation is now managed by the declarative schema
-# changer. As a result, we no longer perform the computed column swap mutation,
-# which truncate was waiting on. Therefore, we only need to test this in the legacy
-# schema changer.
-onlyif config local-legacy-schema-changer
statement error pq: unimplemented: cannot perform TRUNCATE on "t5" which has an ongoing column type change
TRUNCATE TABLE t5;
diff --git a/pkg/sql/logictest/testdata/logic_test/upsert b/pkg/sql/logictest/testdata/logic_test/upsert
index 222e55012568..9ae2aedc897e 100644
--- a/pkg/sql/logictest/testdata/logic_test/upsert
+++ b/pkg/sql/logictest/testdata/logic_test/upsert
@@ -1357,3 +1357,73 @@ query III
SELECT * FROM arbiter_index
----
1 2 10
+
+subtest regression_133146
+
+# Regression test for #133146. Columns that are not updated in an UPSERT should
+# not cause not-null constraint violations.
+statement ok
+CREATE TABLE t133146 (
+ id INT PRIMARY KEY,
+ a INT NOT NULL,
+ b INT
+)
+
+statement ok
+INSERT INTO t133146 (id, a, b) VALUES (1, 2, 3)
+
+# This should not cause a not-null constraint violation of column "a" because
+# the value of "a" is not being updated to NULL in the existing row.
+statement ok
+UPSERT INTO t133146 (id, b) VALUES (1, 30)
+
+query III
+SELECT * FROM t133146
+----
+1 2 30
+
+statement ok
+INSERT INTO t133146 (id, b) VALUES (1, 40) ON CONFLICT (id) DO UPDATE SET b = 40
+
+query III
+SELECT * FROM t133146
+----
+1 2 40
+
+statement error pgcode 23502 pq: null value in column \"a\" violates not-null constraint
+UPSERT INTO t133146 (id, a) VALUES (1, NULL)
+
+statement error pgcode 23502 pq: null value in column \"a\" violates not-null constraint
+INSERT INTO t133146 (id, b) VALUES (1, 50) ON CONFLICT (id) DO UPDATE SET a = NULL
+
+statement ok
+CREATE TABLE t133146b (
+ a INT,
+ b INT NOT NULL,
+ id INT PRIMARY KEY
+)
+
+statement ok
+INSERT INTO t133146b (id, a, b) VALUES (1, 2, 3)
+
+statement ok
+UPSERT INTO t133146b (id, b) VALUES (1, 30)
+
+query III
+SELECT * FROM t133146b
+----
+2 30 1
+
+statement ok
+INSERT INTO t133146b (id, b) VALUES (1, 40) ON CONFLICT (id) DO UPDATE SET b = 40
+
+query III
+SELECT * FROM t133146b
+----
+2 40 1
+
+statement error pgcode 23502 pq: null value in column \"b\" violates not-null constraint
+UPSERT INTO t133146b (id, b) VALUES (1, NULL)
+
+statement error pgcode 23502 pq: null value in column \"b\" violates not-null constraint
+INSERT INTO t133146b (id, a) VALUES (1, 20) ON CONFLICT (id) DO UPDATE SET b = NULL
diff --git a/pkg/sql/logictest/testdata/logic_test/vectorize_types b/pkg/sql/logictest/testdata/logic_test/vectorize_types
index 28738801ddba..933517f07ac3 100644
--- a/pkg/sql/logictest/testdata/logic_test/vectorize_types
+++ b/pkg/sql/logictest/testdata/logic_test/vectorize_types
@@ -182,3 +182,47 @@ FROM
t107615
WHERE
(_bool OR (NOT _bool));
+
+# Regression test for corrupting the column type schema captured by a vectorized
+# operator due to sharing the same type slice by multiple stages of processors
+# during physical planning (#130402).
+statement ok
+CREATE TABLE abcdef (
+ a STRING,
+ b INT4,
+ c INT4,
+ d INT4,
+ e STRING,
+ f STRING
+);
+
+statement ok
+INSERT INTO abcdef (a, b, c, d, e, f) VALUES ('a', 0, 0, 0, 'e', 'f');
+
+statement ok
+CREATE TABLE ghijkl (
+ g INT4,
+ h INT4,
+ i BOOL,
+ j INT4,
+ k STRING,
+ l STRING
+);
+
+statement ok
+INSERT INTO ghijkl (g, h, i, j, k, l) VALUES (0, 0, true, -1, 'k', 'l');
+
+statement ok
+SELECT c7, c1 >= (SELECT c FROM abcdef WHERE e != c3), c5
+FROM (
+ SELECT
+ generate_series(g, d) AS c0,
+ h AS c1,
+ l AS c3,
+ k AS c5,
+ c AS c7
+ FROM ghijkl
+ LEFT JOIN abcdef ON b > j OR i NOT IN (e NOT IN (f, a),)
+ LIMIT 2
+)
+WHERE c0 NOT IN (SELECT NULL FROM ghijkl);
diff --git a/pkg/sql/logictest/testdata/logic_test/zone_config b/pkg/sql/logictest/testdata/logic_test/zone_config
index 1c93a2b21d64..3ed4ca478716 100644
--- a/pkg/sql/logictest/testdata/logic_test/zone_config
+++ b/pkg/sql/logictest/testdata/logic_test/zone_config
@@ -308,11 +308,13 @@ statement ok
SET CLUSTER SETTING sql.schema.force_declarative_statements = "+CONFIGURE ZONE"
skipif config local-mixed-24.1
+skipif config local-mixed-24.2
skipif config local-legacy-schema-changer
statement error pq: pg_type is a system catalog
ALTER TABLE pg_catalog.pg_type CONFIGURE ZONE USING gc.ttlseconds = 100000
skipif config local-mixed-24.1
+skipif config local-mixed-24.2
skipif config local-legacy-schema-changer
statement error pq: columns is a virtual object and cannot be modified
ALTER TABLE information_schema.columns CONFIGURE ZONE USING gc.ttlseconds = 100000
diff --git a/pkg/sql/multitenant_admin_function_test.go b/pkg/sql/multitenant_admin_function_test.go
index 4bb200374dcd..a7f10bddbe0a 100644
--- a/pkg/sql/multitenant_admin_function_test.go
+++ b/pkg/sql/multitenant_admin_function_test.go
@@ -363,7 +363,7 @@ func (te tenantExpected) isSet() bool {
}
func (te tenantExpected) validate(
- t *testing.T, runQuery func() (*gosql.Rows, error), message string,
+ t *testing.T, runQuery func() (_ *gosql.Rows, msg string, _ error),
) {
expectedErrorMessage := te.errorMessage
expectedResults := te.result
@@ -372,7 +372,7 @@ func (te tenantExpected) validate(
// query to make the test less flaky.
// See: https://github.com/cockroachdb/cockroach/issues/95252
testutils.SucceedsSoon(t, func() error {
- rows, err := runQuery()
+ rows, message, err := runQuery()
if expectedErrorMessage == "" {
if err != nil {
return errors.WithMessagef(err, "msg=%s", message)
@@ -408,7 +408,7 @@ func (te tenantExpected) validate(
}
default: // For deterministic results that should be non-empty.
if !strings.Contains(actualColResult, expectedColResult) {
- return errors.Newf("expected %q contains %q %s row=%d col=%d", actualColResult, expectedColResult, message, i, j)
+ return errors.Newf("expected %q to be contained in result %q %s row=%d col=%d", expectedColResult, actualColResult, message, i, j)
}
}
}
@@ -628,10 +628,10 @@ func TestMultiTenantAdminFunction(t *testing.T) {
}
tExp.validate(
t,
- func() (*gosql.Rows, error) {
- return db.QueryContext(ctx, tc.query)
+ func() (*gosql.Rows, string, error) {
+ rows, err := db.QueryContext(ctx, tc.query)
+ return rows, message, err
},
- message,
)
},
)
@@ -674,7 +674,7 @@ func TestTruncateTable(t *testing.T) {
tExp.validate(
t,
- func() (*gosql.Rows, error) {
+ func() (*gosql.Rows, string, error) {
// validateErr and validateRows come from separate queries for TRUNCATE.
_, validateErr := db.ExecContext(ctx, "TRUNCATE TABLE t;")
var validateRows *gosql.Rows
@@ -682,9 +682,8 @@ func TestTruncateTable(t *testing.T) {
validateRows, err = db.QueryContext(ctx, "SELECT start_key, end_key from [SHOW RANGES FROM INDEX t@primary];")
require.NoErrorf(t, err, message)
}
- return validateRows, validateErr
+ return validateRows, message, validateErr
},
- message,
)
},
)
@@ -752,31 +751,31 @@ func TestRelocateVoters(t *testing.T) {
testCluster.ToggleLeaseQueues(false)
testCluster.ToggleReplicateQueues(false)
testCluster.ToggleSplitQueues(false)
- replicaState := getReplicaState(
- t,
- ctx,
- db,
- expectedNumReplicas,
- expectedNumVotingReplicas,
- expectedNumNonVotingReplicas,
- message,
- )
- replicas := replicaState.replicas
- // Set toReplica to the node that does not have a voting replica for t.
- toReplica := getToReplica(testCluster.NodeIDs(), replicas)
- // Set fromReplica to the first non-leaseholder voting replica for t.
- fromReplica := replicas[0]
- if fromReplica == replicaState.leaseholder {
- fromReplica = replicas[1]
- }
- query := fmt.Sprintf(tc.query, fromReplica, toReplica)
- message = getReplicaStateMessage(tenant, query, replicaState, fromReplica, toReplica)
tExp.validate(
t,
- func() (*gosql.Rows, error) {
- return db.QueryContext(ctx, query)
+ func() (_ *gosql.Rows, msg string, _ error) {
+ replicaState := getReplicaState(
+ t,
+ ctx,
+ db,
+ expectedNumReplicas,
+ expectedNumVotingReplicas,
+ expectedNumNonVotingReplicas,
+ message,
+ )
+ replicas := replicaState.replicas
+ // Set toReplica to the node that does not have a voting replica for t.
+ toReplica := getToReplica(testCluster.NodeIDs(), replicas)
+ // Set fromReplica to the first non-leaseholder voting replica for t.
+ fromReplica := replicas[0]
+ if fromReplica == replicaState.leaseholder {
+ fromReplica = replicas[1]
+ }
+ query := fmt.Sprintf(tc.query, fromReplica, toReplica)
+ message = getReplicaStateMessage(tenant, query, replicaState, fromReplica, toReplica)
+ rows, err := db.QueryContext(ctx, query)
+ return rows, message, err
},
- message,
)
},
)
@@ -833,28 +832,28 @@ func TestExperimentalRelocateVoters(t *testing.T) {
testCluster.ToggleLeaseQueues(false)
testCluster.ToggleReplicateQueues(false)
testCluster.ToggleSplitQueues(false)
- replicaState := getReplicaState(
- t,
- ctx,
- db,
- expectedNumReplicas,
- expectedNumVotingReplicas,
- expectedNumNonVotingReplicas,
- message,
- )
- votingReplicas := replicaState.votingReplicas
- newVotingReplicas := make([]roachpb.NodeID, len(votingReplicas))
- newVotingReplicas[0] = votingReplicas[0]
- newVotingReplicas[1] = votingReplicas[1]
- newVotingReplicas[2] = getToReplica(testCluster.NodeIDs(), votingReplicas)
- query := fmt.Sprintf(tc.query, nodeIDsToArrayString(newVotingReplicas))
- message = getReplicaStateMessage(tenant, query, replicaState, votingReplicas[2], newVotingReplicas[2])
tExp.validate(
t,
- func() (*gosql.Rows, error) {
- return db.QueryContext(ctx, query)
+ func() (*gosql.Rows, string, error) {
+ replicaState := getReplicaState(
+ t,
+ ctx,
+ db,
+ expectedNumReplicas,
+ expectedNumVotingReplicas,
+ expectedNumNonVotingReplicas,
+ message,
+ )
+ votingReplicas := replicaState.votingReplicas
+ newVotingReplicas := make([]roachpb.NodeID, len(votingReplicas))
+ newVotingReplicas[0] = votingReplicas[0]
+ newVotingReplicas[1] = votingReplicas[1]
+ newVotingReplicas[2] = getToReplica(testCluster.NodeIDs(), votingReplicas)
+ query := fmt.Sprintf(tc.query, nodeIDsToArrayString(newVotingReplicas))
+ message = getReplicaStateMessage(tenant, query, replicaState, votingReplicas[2], newVotingReplicas[2])
+ rows, err := db.QueryContext(ctx, query)
+ return rows, message, err
},
- message,
)
},
)
@@ -927,27 +926,27 @@ func TestRelocateNonVoters(t *testing.T) {
testCluster.ToggleLeaseQueues(false)
testCluster.ToggleReplicateQueues(false)
testCluster.ToggleSplitQueues(false)
- replicaState := getReplicaState(
- t,
- ctx,
- db,
- expectedNumReplicas,
- expectedNumVotingReplicas,
- expectedNumNonVotingReplicas,
- message,
- )
- // Set toReplica to the node that does not have a voting replica for t.
- toReplica := getToReplica(testCluster.NodeIDs(), replicaState.replicas)
- // Set fromReplica to the first non-leaseholder voting replica for t.
- fromReplica := replicaState.nonVotingReplicas[0]
- query := fmt.Sprintf(tc.query, fromReplica, toReplica)
- message = getReplicaStateMessage(tenant, query, replicaState, fromReplica, toReplica)
tExp.validate(
t,
- func() (*gosql.Rows, error) {
- return db.QueryContext(ctx, query)
+ func() (*gosql.Rows, string, error) {
+ replicaState := getReplicaState(
+ t,
+ ctx,
+ db,
+ expectedNumReplicas,
+ expectedNumVotingReplicas,
+ expectedNumNonVotingReplicas,
+ message,
+ )
+ // Set toReplica to the node that does not have a voting replica for t.
+ toReplica := getToReplica(testCluster.NodeIDs(), replicaState.replicas)
+ // Set fromReplica to the first non-leaseholder voting replica for t.
+ fromReplica := replicaState.nonVotingReplicas[0]
+ query := fmt.Sprintf(tc.query, fromReplica, toReplica)
+ message = getReplicaStateMessage(tenant, query, replicaState, fromReplica, toReplica)
+ rows, err := db.QueryContext(ctx, query)
+ return rows, message, err
},
- message,
)
},
)
@@ -1002,24 +1001,24 @@ func TestExperimentalRelocateNonVoters(t *testing.T) {
testCluster.ToggleLeaseQueues(false)
testCluster.ToggleReplicateQueues(false)
testCluster.ToggleSplitQueues(false)
- replicaState := getReplicaState(
- t,
- ctx,
- db,
- expectedNumReplicas,
- expectedNumVotingReplicas,
- expectedNumNonVotingReplicas,
- message,
- )
- newNonVotingReplicas := []roachpb.NodeID{getToReplica(testCluster.NodeIDs(), replicaState.replicas)}
- query := fmt.Sprintf(tc.query, nodeIDsToArrayString(newNonVotingReplicas))
- message = getReplicaStateMessage(tenant, query, replicaState, replicaState.nonVotingReplicas[0], newNonVotingReplicas[0])
tExp.validate(
t,
- func() (*gosql.Rows, error) {
- return db.QueryContext(ctx, query)
+ func() (*gosql.Rows, string, error) {
+ replicaState := getReplicaState(
+ t,
+ ctx,
+ db,
+ expectedNumReplicas,
+ expectedNumVotingReplicas,
+ expectedNumNonVotingReplicas,
+ message,
+ )
+ newNonVotingReplicas := []roachpb.NodeID{getToReplica(testCluster.NodeIDs(), replicaState.replicas)}
+ query := fmt.Sprintf(tc.query, nodeIDsToArrayString(newNonVotingReplicas))
+ message = getReplicaStateMessage(tenant, query, replicaState, replicaState.nonVotingReplicas[0], newNonVotingReplicas[0])
+ rows, err := db.QueryContext(ctx, query)
+ return rows, message, err
},
- message,
)
},
)
diff --git a/pkg/sql/mutation_test.go b/pkg/sql/mutation_test.go
index 8ceb170ca9a1..5f4c89b988fd 100644
--- a/pkg/sql/mutation_test.go
+++ b/pkg/sql/mutation_test.go
@@ -9,7 +9,6 @@ import (
"context"
gosql "database/sql"
"fmt"
- "reflect"
"sync"
"testing"
@@ -264,9 +263,8 @@ PARTITION ALL BY LIST (r) (
if err := rows.Scan(&id, &k, &r, &a); err != nil {
t.Fatal(err)
}
- res := []string{id, k, r, a}
- if !reflect.DeepEqual(tc.expectedOutput, res) {
- t.Fatalf("%d: expected %v, got %v", idx, tc.expectedOutput, res)
+ if id != tc.expectedOutput[0] || k != tc.expectedOutput[1] || r != tc.expectedOutput[2] || a != tc.expectedOutput[3] {
+ t.Fatalf("%d: expected %v, got %v", idx, tc.expectedOutput, []string{id, k, r, a})
}
}
rows.Close()
diff --git a/pkg/sql/opt/cat/BUILD.bazel b/pkg/sql/opt/cat/BUILD.bazel
index 94b1abe17f73..53868ab1316c 100644
--- a/pkg/sql/opt/cat/BUILD.bazel
+++ b/pkg/sql/opt/cat/BUILD.bazel
@@ -33,7 +33,6 @@ go_library(
"//pkg/sql/sem/tree",
"//pkg/sql/sessiondata",
"//pkg/sql/types",
- "//pkg/util/encoding",
"//pkg/util/treeprinter",
"@com_github_cockroachdb_errors//:errors",
"@com_github_cockroachdb_redact//:redact",
diff --git a/pkg/sql/opt/cat/utils.go b/pkg/sql/opt/cat/utils.go
index 3ee5594fa047..8cb6694ebc95 100644
--- a/pkg/sql/opt/cat/utils.go
+++ b/pkg/sql/opt/cat/utils.go
@@ -12,7 +12,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
- "github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/treeprinter"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/redact"
@@ -325,7 +324,7 @@ func formatFamily(family Family, buf *bytes.Buffer) {
// markRedactable is true.
func MaybeMarkRedactable(unsafe string, markRedactable bool) string {
if markRedactable {
- return string(redact.Sprintf("%s", encoding.Unsafe(unsafe)))
+ return string(redact.Sprintf("%s", redact.Unsafe(unsafe)))
}
return unsafe
}
diff --git a/pkg/sql/opt/exec/execbuilder/BUILD.bazel b/pkg/sql/opt/exec/execbuilder/BUILD.bazel
index b13e0c745a99..e9b21f2614eb 100644
--- a/pkg/sql/opt/exec/execbuilder/BUILD.bazel
+++ b/pkg/sql/opt/exec/execbuilder/BUILD.bazel
@@ -47,6 +47,7 @@ go_library(
"//pkg/sql/sqlerrors",
"//pkg/sql/sqltelemetry",
"//pkg/sql/types",
+ "//pkg/util",
"//pkg/util/buildutil",
"//pkg/util/encoding",
"//pkg/util/errorutil",
diff --git a/pkg/sql/opt/exec/execbuilder/builder.go b/pkg/sql/opt/exec/execbuilder/builder.go
index 70fb81ef595e..62d61eacda63 100644
--- a/pkg/sql/opt/exec/execbuilder/builder.go
+++ b/pkg/sql/opt/exec/execbuilder/builder.go
@@ -7,8 +7,6 @@ package execbuilder
import (
"context"
- "slices"
- "strconv"
"time"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
@@ -174,41 +172,7 @@ type Builder struct {
IsANSIDML bool
// IndexesUsed list the indexes used in query with the format tableID@indexID.
- IndexesUsed
-}
-
-// IndexesUsed is a list of indexes used in a query.
-type IndexesUsed struct {
- indexes []struct {
- tableID cat.StableID
- indexID cat.StableID
- }
-}
-
-// add adds the given index to the list, if it is not already present.
-func (iu *IndexesUsed) add(tableID, indexID cat.StableID) {
- s := struct {
- tableID cat.StableID
- indexID cat.StableID
- }{tableID, indexID}
- if !slices.Contains(iu.indexes, s) {
- iu.indexes = append(iu.indexes, s)
- }
-}
-
-// Strings returns a slice of strings with the format tableID@indexID for each
-// index in the list.
-//
-// TODO(mgartner): Use a slice of struct{uint64, uint64} instead of converting
-// to strings.
-func (iu *IndexesUsed) Strings() []string {
- res := make([]string, len(iu.indexes))
- const base = 10
- for i, u := range iu.indexes {
- res[i] = strconv.FormatUint(uint64(u.tableID), base) + "@" +
- strconv.FormatUint(uint64(u.indexID), base)
- }
- return res
+ IndexesUsed []string
}
// New constructs an instance of the execution node builder using the
diff --git a/pkg/sql/opt/exec/execbuilder/relational.go b/pkg/sql/opt/exec/execbuilder/relational.go
index 9c7863836971..fe3da5fca334 100644
--- a/pkg/sql/opt/exec/execbuilder/relational.go
+++ b/pkg/sql/opt/exec/execbuilder/relational.go
@@ -38,6 +38,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/sqlerrors"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/sql/types"
+ "github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/buildutil"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/errorutil"
@@ -755,7 +756,7 @@ func (b *Builder) buildScan(scan *memo.ScanExpr) (_ execPlan, outputCols colOrdM
return execPlan{}, colOrdMap{},
errors.AssertionFailedf("expected inverted index scan to have a constraint")
}
- b.IndexesUsed.add(tab.ID(), idx.ID())
+ b.IndexesUsed = util.CombineUnique(b.IndexesUsed, []string{fmt.Sprintf("%d@%d", tab.ID(), idx.ID())})
// Save if we planned a full (large) table/index scan on the builder so that
// the planner can be made aware later. We only do this for non-virtual
@@ -2296,7 +2297,7 @@ func (b *Builder) buildIndexJoin(
// TODO(radu): the distsql implementation of index join assumes that the input
// starts with the PK columns in order (#40749).
pri := tab.Index(cat.PrimaryIndex)
- b.IndexesUsed.add(tab.ID(), pri.ID())
+ b.IndexesUsed = util.CombineUnique(b.IndexesUsed, []string{fmt.Sprintf("%d@%d", tab.ID(), pri.ID())})
keyCols := make([]exec.NodeColumnOrdinal, pri.KeyColumnCount())
for i := range keyCols {
keyCols[i], err = getNodeColumnOrdinal(inputCols, join.Table.ColumnID(pri.Column(i).Ordinal()))
@@ -2674,7 +2675,7 @@ func (b *Builder) buildLookupJoin(
tab := md.Table(join.Table)
idx := tab.Index(join.Index)
- b.IndexesUsed.add(tab.ID(), idx.ID())
+ b.IndexesUsed = util.CombineUnique(b.IndexesUsed, []string{fmt.Sprintf("%d@%d", tab.ID(), idx.ID())})
locking, err := b.buildLocking(join.Table, join.Locking)
if err != nil {
@@ -2854,7 +2855,7 @@ func (b *Builder) buildInvertedJoin(
md := b.mem.Metadata()
tab := md.Table(join.Table)
idx := tab.Index(join.Index)
- b.IndexesUsed.add(tab.ID(), idx.ID())
+ b.IndexesUsed = util.CombineUnique(b.IndexesUsed, []string{fmt.Sprintf("%d@%d", tab.ID(), idx.ID())})
prefixEqCols := make([]exec.NodeColumnOrdinal, len(join.PrefixKeyCols))
for i, c := range join.PrefixKeyCols {
@@ -2996,8 +2997,10 @@ func (b *Builder) buildZigzagJoin(
rightTable := md.Table(join.RightTable)
leftIndex := leftTable.Index(join.LeftIndex)
rightIndex := rightTable.Index(join.RightIndex)
- b.IndexesUsed.add(leftTable.ID(), leftIndex.ID())
- b.IndexesUsed.add(rightTable.ID(), rightIndex.ID())
+ b.IndexesUsed = util.CombineUnique(b.IndexesUsed,
+ []string{fmt.Sprintf("%d@%d", leftTable.ID(), leftIndex.ID())})
+ b.IndexesUsed = util.CombineUnique(b.IndexesUsed,
+ []string{fmt.Sprintf("%d@%d", rightTable.ID(), rightIndex.ID())})
leftEqCols := make([]exec.TableColumnOrdinal, len(join.LeftEqCols))
rightEqCols := make([]exec.TableColumnOrdinal, len(join.RightEqCols))
diff --git a/pkg/sql/opt/exec/execbuilder/testdata/explain_gist b/pkg/sql/opt/exec/execbuilder/testdata/explain_gist
index 6ab0e52fd045..cc6d234ce87a 100644
--- a/pkg/sql/opt/exec/execbuilder/testdata/explain_gist
+++ b/pkg/sql/opt/exec/execbuilder/testdata/explain_gist
@@ -203,3 +203,9 @@ SELECT crdb_internal.decode_external_plan_gist('AixE')
----
• split
index: ?@?
+
+# Regression test for #133015. Gracefully handle decoding negative integers.
+query T
+SELECT crdb_internal.decode_external_plan_gist('Aifvzn5p':::STRING)
+----
+• create view
diff --git a/pkg/sql/opt/exec/explain/plan_gist_factory.go b/pkg/sql/opt/exec/explain/plan_gist_factory.go
index 4061743bd520..4a2c200a1824 100644
--- a/pkg/sql/opt/exec/explain/plan_gist_factory.go
+++ b/pkg/sql/opt/exec/explain/plan_gist_factory.go
@@ -332,6 +332,9 @@ func (f *PlanGistFactory) encodeNodeColumnOrdinals(vals []exec.NodeColumnOrdinal
func (f *PlanGistFactory) decodeNodeColumnOrdinals() []exec.NodeColumnOrdinal {
l := f.decodeInt()
+ if l < 0 {
+ return nil
+ }
vals := make([]exec.NodeColumnOrdinal, l)
return vals
}
@@ -342,6 +345,9 @@ func (f *PlanGistFactory) encodeResultColumns(vals colinfo.ResultColumns) {
func (f *PlanGistFactory) decodeResultColumns() colinfo.ResultColumns {
numCols := f.decodeInt()
+ if numCols < 0 {
+ return nil
+ }
return make(colinfo.ResultColumns, numCols)
}
@@ -464,6 +470,9 @@ func (f *PlanGistFactory) encodeRows(rows [][]tree.TypedExpr) {
func (f *PlanGistFactory) decodeRows() [][]tree.TypedExpr {
numRows := f.decodeInt()
+ if numRows < 0 {
+ return nil
+ }
return make([][]tree.TypedExpr, numRows)
}
diff --git a/pkg/sql/opt/xform/testdata/rules/select b/pkg/sql/opt/xform/testdata/rules/select
index 968106f49268..4e5e57bee44c 100644
--- a/pkg/sql/opt/xform/testdata/rules/select
+++ b/pkg/sql/opt/xform/testdata/rules/select
@@ -2820,7 +2820,7 @@ CREATE TABLE t132669 (
# unnecessary recursion to trigger a stack overflow without having to make the
# `IN` list below huge - triggering a stack overflow with Go's default max stack
# size requires a list of ~1.6 million elements.
-opt max-stack=50KB format=hide-all
+opt max-stack=125KB format=hide-all
SELECT * FROM t132669
WHERE a IN (
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
diff --git a/pkg/sql/pgwire/pre_serve.go b/pkg/sql/pgwire/pre_serve.go
index 77cc43ae92d2..db0cf0a73306 100644
--- a/pkg/sql/pgwire/pre_serve.go
+++ b/pkg/sql/pgwire/pre_serve.go
@@ -441,6 +441,9 @@ func (s *PreServeConnHandler) maybeUpgradeToSecureConn(
// TODO(knz): Remove this condition - see
// https://github.com/cockroachdb/cockroach/issues/53404
if s.cfg.Insecure {
+ if buildutil.CrdbTestBuild {
+ log.Infof(ctx, "using insecure mode since version=%d and cfg.Insecure=true", version)
+ }
return
}
@@ -453,6 +456,9 @@ func (s *PreServeConnHandler) maybeUpgradeToSecureConn(
log.Warningf(ctx, "client cannot connect since version=%d AcceptSQLWithoutTLS=false and connType=%s", version, connType)
}
}
+ if buildutil.CrdbTestBuild {
+ log.Infof(ctx, "client did not request SSL version=%d AcceptSQLWithoutTLS=false and connType=%s", version, connType)
+ }
return
}
@@ -473,6 +479,9 @@ func (s *PreServeConnHandler) maybeUpgradeToSecureConn(
// Protocol sanity check.
if len(buf.Msg) > 0 {
serverErr = errors.Errorf("unexpected data after SSLRequest: %q", buf.Msg)
+ if buildutil.CrdbTestBuild {
+ log.Warningf(ctx, "protocol error err=%v", serverErr)
+ }
return
}
@@ -482,6 +491,9 @@ func (s *PreServeConnHandler) maybeUpgradeToSecureConn(
// Do we have a TLS configuration?
tlsConfig, serverErr := s.getTLSConfig()
if serverErr != nil {
+ if buildutil.CrdbTestBuild {
+ log.Warningf(ctx, "could not get TLS config err=%v", serverErr)
+ }
return
}
@@ -494,12 +506,21 @@ func (s *PreServeConnHandler) maybeUpgradeToSecureConn(
}
n, serverErr = conn.Write(sslUnsupported)
if serverErr != nil {
+ if buildutil.CrdbTestBuild {
+ log.Warningf(ctx, "error while sending sslUnsupported message to client err=%v", serverErr)
+ }
return
}
} else {
+ if buildutil.CrdbTestBuild {
+ log.Infof(ctx, "sending sslSupported message to client")
+ }
// We have a TLS configuration. Upgrade the connection.
n, serverErr = conn.Write(sslSupported)
if serverErr != nil {
+ if buildutil.CrdbTestBuild {
+ log.Warningf(ctx, "error while sending sslSupported message to client err=%v", serverErr)
+ }
return
}
newConn = tls.Server(conn, tlsConfig)
@@ -509,6 +530,9 @@ func (s *PreServeConnHandler) maybeUpgradeToSecureConn(
// Finally, re-read the version/command from the client.
newVersion, *buf, serverErr = s.readVersion(newConn)
+ if buildutil.CrdbTestBuild && serverErr != nil {
+ log.Warningf(ctx, "error when reading version err=%v", serverErr)
+ }
return
}
diff --git a/pkg/sql/resolver.go b/pkg/sql/resolver.go
index de406e70529c..1af0e88da3b3 100644
--- a/pkg/sql/resolver.go
+++ b/pkg/sql/resolver.go
@@ -538,6 +538,8 @@ func (p *planner) getDescriptorsFromTargetListForPrivilegeChange(
objectType: privilege.Sequence,
},
)
+ } else if targets.Tables.SequenceOnly {
+ return nil, pgerror.Newf(pgcode.WrongObjectType, "%s is not a sequence", tableDesc.GetName())
} else {
descs = append(
descs,
diff --git a/pkg/sql/run_control_test.go b/pkg/sql/run_control_test.go
index a2cae8aa4929..5388460f5b8c 100644
--- a/pkg/sql/run_control_test.go
+++ b/pkg/sql/run_control_test.go
@@ -1016,6 +1016,10 @@ func TestStatementTimeoutForSchemaChangeCommit(t *testing.T) {
require.NoError(t, err)
// Test implicit transactions first.
blockSchemaChange.Swap(true)
+ defer func() {
+ close(waitForTimeout)
+ blockSchemaChange.Swap(false)
+ }()
if implicitTxn {
_, err := conn.DB.ExecContext(ctx, "ALTER TABLE t1 ADD COLUMN j INT DEFAULT 32")
require.ErrorContains(t, err, sqlerrors.QueryTimeoutError.Error())
@@ -1030,8 +1034,6 @@ func TestStatementTimeoutForSchemaChangeCommit(t *testing.T) {
err = txn.Commit()
require.NoError(t, err)
}
- close(waitForTimeout)
- blockSchemaChange.Swap(false)
})
}
}
diff --git a/pkg/sql/schemachanger/BUILD.bazel b/pkg/sql/schemachanger/BUILD.bazel
index 111eb6a546d3..10950abfc197 100644
--- a/pkg/sql/schemachanger/BUILD.bazel
+++ b/pkg/sql/schemachanger/BUILD.bazel
@@ -57,6 +57,7 @@ go_test(
"//pkg/sql/schemachanger/scop",
"//pkg/sql/schemachanger/scplan",
"//pkg/sql/schemachanger/sctest", # keep
+ "//pkg/sql/sem/eval",
"//pkg/sql/sessiondatapb",
"//pkg/testutils",
"//pkg/testutils/serverutils",
diff --git a/pkg/sql/schemachanger/dml_injection_test.go b/pkg/sql/schemachanger/dml_injection_test.go
index 88b03d5eb997..0020b1e02d4e 100644
--- a/pkg/sql/schemachanger/dml_injection_test.go
+++ b/pkg/sql/schemachanger/dml_injection_test.go
@@ -17,6 +17,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan"
+ "github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
@@ -457,6 +458,11 @@ func TestAlterTableDMLInjection(t *testing.T) {
testCluster := serverutils.StartCluster(t, 1, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
+ SQLEvalContext: &eval.TestingKnobs{
+ // We disable the randomization of some batch sizes because with
+ // some low values the test takes much longer.
+ ForceProductionValues: true,
+ },
SQLDeclarativeSchemaChanger: &scexec.TestingKnobs{
BeforeStage: func(p scplan.Plan, stageIdx int) error {
if !clusterCreated.Load() {
diff --git a/pkg/sql/schemachanger/scbuild/builder_test.go b/pkg/sql/schemachanger/scbuild/builder_test.go
index 887467648510..c3a3601c5b4a 100644
--- a/pkg/sql/schemachanger/scbuild/builder_test.go
+++ b/pkg/sql/schemachanger/scbuild/builder_test.go
@@ -51,8 +51,6 @@ func TestBuildDataDriven(t *testing.T) {
ctx := context.Background()
- skip.UnderRace(t, "expensive and can easily extend past test timeout")
-
datadriven.Walk(t, datapathutils.TestDataPath(t), func(t *testing.T, path string) {
for _, depsType := range []struct {
name string
@@ -97,7 +95,6 @@ func TestBuildDataDriven(t *testing.T) {
sd.NewSchemaChangerMode = sessiondatapb.UseNewSchemaChangerUnsafe
sd.ApplicationName = ""
sd.EnableUniqueWithoutIndexConstraints = true
- sd.AlterColumnTypeGeneralEnabled = true
},
),
),
diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_column.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_column.go
index c3b7f449c19f..91e1cd8dc878 100644
--- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_column.go
+++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_column.go
@@ -311,18 +311,17 @@ func columnNamesToIDs(b BuildCtx, tbl *scpb.Table) map[string]descpb.ColumnID {
}
type addColumnSpec struct {
- tbl *scpb.Table
- col *scpb.Column
- fam *scpb.ColumnFamily
- name *scpb.ColumnName
- colType *scpb.ColumnType
- def *scpb.ColumnDefaultExpression
- onUpdate *scpb.ColumnOnUpdateExpression
- compute *scpb.ColumnComputeExpression
- comment *scpb.ColumnComment
- unique bool
- notNull bool
- transientCompute bool
+ tbl *scpb.Table
+ col *scpb.Column
+ fam *scpb.ColumnFamily
+ name *scpb.ColumnName
+ colType *scpb.ColumnType
+ def *scpb.ColumnDefaultExpression
+ onUpdate *scpb.ColumnOnUpdateExpression
+ compute *scpb.ColumnComputeExpression
+ comment *scpb.ColumnComment
+ unique bool
+ notNull bool
}
// addColumn adds a column as specified in the `spec`. It delegates most of the work
@@ -349,11 +348,7 @@ func addColumn(b BuildCtx, spec addColumnSpec, n tree.NodeFormatter) (backing *s
b.Add(spec.onUpdate)
}
if spec.compute != nil {
- if spec.transientCompute {
- b.AddTransient(spec.compute)
- } else {
- b.Add(spec.compute)
- }
+ b.Add(spec.compute)
}
if spec.comment != nil {
b.Add(spec.comment)
diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_column_type.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_column_type.go
index 0bed230067e5..80a45718e4b3 100644
--- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_column_type.go
+++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_column_type.go
@@ -9,11 +9,7 @@ import (
"fmt"
"github.com/cockroachdb/cockroach/pkg/build"
- "github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
@@ -24,7 +20,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/sem/cast"
"github.com/cockroachdb/cockroach/pkg/sql/sem/catid"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
- "github.com/cockroachdb/cockroach/pkg/sql/sem/volatility"
"github.com/cockroachdb/cockroach/pkg/sql/sqlerrors"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/errors"
@@ -101,7 +96,7 @@ func alterTableAlterColumnType(
case schemachange.ColumnConversionValidate:
handleValidationOnlyColumnConversion(b, t, col, oldColType, &newColType)
case schemachange.ColumnConversionGeneral:
- handleGeneralColumnConversion(b, stmt, t, tn, tbl, col, oldColType, &newColType)
+ handleGeneralColumnConversion(b, t, col, oldColType, &newColType)
default:
panic(scerrors.NotImplementedErrorf(t,
"alter type conversion %v not handled", kind))
@@ -214,19 +209,12 @@ func handleValidationOnlyColumnConversion(
// to complete the data type conversion.
func handleGeneralColumnConversion(
b BuildCtx,
- stmt tree.Statement,
t *tree.AlterTableAlterColumnType,
- tn *tree.TableName,
- tbl *scpb.Table,
col *scpb.Column,
oldColType, newColType *scpb.ColumnType,
) {
failIfExperimentalSettingNotSet(b, oldColType, newColType)
- // To handle the conversion, we remove the old column and add a new one with
- // the correct type. The new column will temporarily have a computed expression
- // referring to the old column, used only for the backfill process.
- //
// Because we need to rewrite data to change the data type, there are
// additional validation checks required that are incompatible with this
// process.
@@ -240,132 +228,11 @@ func handleGeneralColumnConversion(
panic(sqlerrors.NewAlterColumnTypeColWithConstraintNotSupportedErr())
case *scpb.SecondaryIndex:
panic(sqlerrors.NewAlterColumnTypeColInIndexNotSupportedErr())
- case *scpb.ColumnComputeExpression:
- // TODO(#125844): we currently lose the original computed expression.
- panic(scerrors.NotImplementedErrorf(t,
- "backfilling during ALTER COLUMN TYPE for a column "+
- "with a computed expression is not supported"))
- case *scpb.ColumnOnUpdateExpression, *scpb.ColumnDefaultExpression:
- // TODO(#132909): The use of a temporary compute expression currently
- // blocks altering types with DEFAULT or ON UPDATE expressions. We should
- // be able to add these after the backfill completes and the old column is
- // dropped by using dependency rules.
- panic(scerrors.NotImplementedErrorf(t,
- "backfilling during ALTER COLUMN TYPE for a column "+
- "with a DEFAULT or ON UPDATE expression is not supported"))
}
})
- // We block any attempt to alter the type of a column that is a key column in
- // the primary key. We can't use walkColumnDependencies here, as it doesn't
- // differentiate between key columns and stored columns.
- pk := mustRetrievePrimaryIndex(b, tbl.TableID)
- for _, keyCol := range getIndexColumns(b.QueryByID(tbl.TableID), pk.IndexID, scpb.IndexColumn_KEY) {
- if keyCol.ColumnID == col.ColumnID {
- panic(sqlerrors.NewAlterColumnTypeColInIndexNotSupportedErr())
- }
- }
-
- // TODO(#47137): Only support alter statements that only have a single command.
- switch s := stmt.(type) {
- case *tree.AlterTable:
- if len(s.Cmds) > 1 {
- panic(sqlerrors.NewAlterColTypeInCombinationNotSupportedError())
- }
- }
-
- // In version 25.1, we introduced the necessary dependency rules to ensure the
- // general path works. Without these rules, we encounter failures during the
- // ALTER operation. To avoid this, we revert to legacy handling if not running
- // on version 25.1.
- // TODO(25.1): Update V24_3 here once V25_1 is defined.
- if !b.EvalCtx().Settings.Version.ActiveVersion(b).IsActive(clusterversion.V24_3) {
- panic(scerrors.NotImplementedErrorf(t,
- "old active version; ALTER COLUMN TYPE requires backfill. Reverting to legacy handling"))
- }
-
- // TODO(#132936): Not yet supported in the DSC. Throwing an error to trigger
- // fallback to legacy.
- if newColType.Type.Family() == types.EnumFamily {
- panic(scerrors.NotImplementedErrorf(t,
- "backfilling during ALTER COLUMN TYPE for an enum column "+
- "type is not supported"))
- }
-
- // Generate the ID of the new column we are adding.
- newColID := b.NextTableColumnID(tbl)
- newColType.ColumnID = newColID
-
- // Create a computed expression for the new column that references the old column.
- //
- // During the backfill process to populate the new column, the old column is still
- // referenced by its original name, so we use that in the expression.
- colName := mustRetrieveColumnName(b, tbl.TableID, col.ColumnID)
- expr, err := getComputeExpressionForBackfill(b, t, tn, tbl.TableID, colName.Name, newColType)
- if err != nil {
- panic(err)
- }
-
- // First set the target status of the old column to drop. We will replace this
- // column with a new column. This column stays visible until the second backfill.
- b.Drop(col)
- b.Drop(colName)
- b.Drop(oldColType)
- handleDropColumnPrimaryIndexes(b, tbl, col)
-
- // Add the spec for the new column. It will be identical to the column it is replacing,
- // except the type will differ, and it will have a transient computed expression.
- // This expression will reference the original column to facilitate the backfill.
- // This column becomes visible after the first backfill.
- spec := addColumnSpec{
- tbl: tbl,
- col: &scpb.Column{
- TableID: tbl.TableID,
- ColumnID: newColID,
- IsHidden: col.IsHidden,
- IsInaccessible: col.IsInaccessible,
- IsSystemColumn: col.IsSystemColumn,
- PgAttributeNum: getPgAttributeNum(col),
- },
- name: &scpb.ColumnName{
- TableID: tbl.TableID,
- ColumnID: newColID,
- Name: colName.Name,
- },
- colType: newColType,
- compute: &scpb.ColumnComputeExpression{
- TableID: tbl.TableID,
- ColumnID: newColID,
- Expression: *b.WrapExpression(tbl.TableID, expr),
- },
- transientCompute: true,
- notNull: retrieveColumnNotNull(b, tbl.TableID, col.ColumnID) != nil,
- // TODO(#133040): The new column will be placed in the same column family as the
- // one it's replacing, so there's no need to specify a family. However, the new
- // column will be added to the end of the family's column ID list, which changes
- // its internal ordering. This needs to be revisited as CDC may have a dependency
- // on the same ordering (see TestEventColumnOrderingWithSchemaChanges).
- fam: nil,
- }
- addColumn(b, spec, t)
-
- // The above operation will cause a backfill to occur twice. Once with both columns,
- // then another time with the old column removed. Since both columns will exist at
- // the same time for a short period of time, we need to rename the old column so that
- // we can access either one. We add this name as a transient so that it is cleaned up
- // prior to the old column being totally removed.
- nameExists := func(name string) bool {
- return getColumnIDFromColumnName(b, tbl.TableID, tree.Name(name), false /* required */) != 0
- }
- oldColumnRename := tabledesc.GenerateUniqueName(fmt.Sprintf("%s_shadow", colName.Name), nameExists)
- b.AddTransient(&scpb.ColumnName{
- TableID: tbl.TableID,
- ColumnID: col.ColumnID,
- Name: oldColumnRename,
- // If we don't complete the operation, the column won't be dropped, so we
- // need to remember the original name to preserve it.
- AbsentName: colName.Name,
- })
+ // TODO(spilchen): Implement the general conversion logic in #127014
+ panic(scerrors.NotImplementedErrorf(t, "general alter type conversion not supported in the declarative schema changer"))
}
func updateColumnType(b BuildCtx, oldColType, newColType *scpb.ColumnType) {
@@ -428,43 +295,3 @@ func maybeWriteNoticeForFKColTypeMismatch(b BuildCtx, col *scpb.Column, colType
}
})
}
-
-func getComputeExpressionForBackfill(
- b BuildCtx,
- t *tree.AlterTableAlterColumnType,
- tn *tree.TableName,
- tableID catid.DescID,
- colName string,
- newColType *scpb.ColumnType,
-) (expr tree.Expr, err error) {
- // If a USING clause wasn't specified, the default expression is casting the column to the new type.
- if t.Using == nil {
- return parser.ParseExpr(fmt.Sprintf("%s::%s", colName, newColType.Type.SQLString()))
- }
-
- expr, err = parser.ParseExpr(t.Using.String())
- if err != nil {
- return
- }
-
- _, _, _, err = schemaexpr.DequalifyAndValidateExprImpl(b, expr, newColType.Type,
- tree.AlterColumnTypeUsingExpr, b.SemaCtx(), volatility.Volatile, tn, b.ClusterSettings().Version.ActiveVersion(b),
- func() colinfo.ResultColumns {
- return getNonDropResultColumns(b, tableID)
- },
- func(columnName tree.Name) (exists bool, accessible bool, id catid.ColumnID, typ *types.T) {
- return columnLookupFn(b, tableID, columnName)
- },
- )
- return
-}
-
-// getPgAttributeNum returns the column's ordering value as stored in the catalog.
-// This ensures the column keeps its position for 'SELECT *' queries when replacing
-// an old column with a new one.
-func getPgAttributeNum(col *scpb.Column) catid.PGAttributeNum {
- if col.PgAttributeNum != 0 {
- return col.PgAttributeNum
- }
- return catid.PGAttributeNum(col.ColumnID)
-}
diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_drop_column.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_drop_column.go
index 20d2d931700a..84384e565688 100644
--- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_drop_column.go
+++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_drop_column.go
@@ -265,7 +265,7 @@ func dropColumn(
}
})
if _, _, ct := scpb.FindColumnType(colElts); !ct.IsVirtual {
- handleDropColumnPrimaryIndexes(b, tbl, col)
+ handleDropColumnPrimaryIndexes(b, tbl, n, col)
}
assertAllColumnElementsAreDropped(colElts)
}
@@ -435,7 +435,9 @@ func panicIfColReferencedInPredicate(
}
}
-func handleDropColumnPrimaryIndexes(b BuildCtx, tbl *scpb.Table, col *scpb.Column) {
+func handleDropColumnPrimaryIndexes(
+ b BuildCtx, tbl *scpb.Table, n tree.NodeFormatter, col *scpb.Column,
+) {
inflatedChain := getInflatedPrimaryIndexChain(b, tbl.TableID)
// If `col` is already public in `old`, then we just need to drop it from `final`.
diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/helpers.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/helpers.go
index 943ef83a35ef..5246a968811e 100644
--- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/helpers.go
+++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/helpers.go
@@ -1754,26 +1754,6 @@ func mustRetrieveIndexNameElem(
}).MustGetOneElement()
}
-func mustRetrieveColumnName(
- b BuildCtx, tableID catid.DescID, columnID catid.ColumnID,
-) *scpb.ColumnName {
- return b.QueryByID(tableID).FilterColumnName().
- Filter(func(_ scpb.Status, _ scpb.TargetStatus, e *scpb.ColumnName) bool { return e.ColumnID == columnID }).
- MustGetOneElement()
-}
-
-func mustRetrievePrimaryIndex(b BuildCtx, tableID catid.DescID) *scpb.PrimaryIndex {
- return b.QueryByID(tableID).FilterPrimaryIndex().MustGetOneElement()
-}
-
-func retrieveColumnNotNull(
- b BuildCtx, tableID catid.DescID, columnID catid.ColumnID,
-) *scpb.ColumnNotNull {
- return b.QueryByID(tableID).FilterColumnNotNull().
- Filter(func(_ scpb.Status, _ scpb.TargetStatus, e *scpb.ColumnNotNull) bool { return e.ColumnID == columnID }).
- MustGetZeroOrOneElement()
-}
-
// mustRetrievePartitioningFromIndexPartitioning retrieves the partitioning
// from the index partitioning element associated with the given tableID
// and indexID.
diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/process.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/process.go
index 869fcb373cf0..a1fcf977c337 100644
--- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/process.go
+++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/process.go
@@ -63,7 +63,7 @@ var supportedStatements = map[reflect.Type]supportedStatement{
reflect.TypeOf((*tree.CreateSchema)(nil)): {fn: CreateSchema, statementTags: []string{tree.CreateSchemaTag}, on: true, checks: nil},
reflect.TypeOf((*tree.CreateSequence)(nil)): {fn: CreateSequence, statementTags: []string{tree.CreateSequenceTag}, on: true, checks: isV241Active},
reflect.TypeOf((*tree.CreateDatabase)(nil)): {fn: CreateDatabase, statementTags: []string{tree.CreateDatabaseTag}, on: true, checks: isV241Active},
- reflect.TypeOf((*tree.SetZoneConfig)(nil)): {fn: SetZoneConfig, statementTags: []string{tree.ConfigureZoneTag}, on: false, checks: isV242Active},
+ reflect.TypeOf((*tree.SetZoneConfig)(nil)): {fn: SetZoneConfig, statementTags: []string{tree.ConfigureZoneTag}, on: false, checks: isV243Active},
reflect.TypeOf((*tree.CreateTrigger)(nil)): {fn: CreateTrigger, statementTags: []string{tree.CreateTriggerTag}, on: true, checks: isV243Active},
reflect.TypeOf((*tree.DropTrigger)(nil)): {fn: DropTrigger, statementTags: []string{tree.DropTriggerTag}, on: true, checks: isV243Active},
}
diff --git a/pkg/sql/schemachanger/scbuild/testdata/alter_table_alter_column_type b/pkg/sql/schemachanger/scbuild/testdata/alter_table_alter_column_type
index dee6871b50e9..ff3801b75804 100644
--- a/pkg/sql/schemachanger/scbuild/testdata/alter_table_alter_column_type
+++ b/pkg/sql/schemachanger/scbuild/testdata/alter_table_alter_column_type
@@ -27,85 +27,3 @@ ALTER TABLE t ALTER COLUMN c2 SET DATA TYPE CHAR(5)
{columnId: 2, elementCreationMetadata: {in231OrLater: true, in243OrLater: true}, isNullable: true, tableId: 104, type: {family: StringFamily, oid: 1042, visibleType: 8, width: 5}, typeName: CHAR(5)}
- [[CheckConstraint:{DescID: 104, IndexID: 1, ConstraintID: 2, ReferencedColumnIDs: [2]}, TRANSIENT_ABSENT], ABSENT]
{columnIds: [2], constraintId: 2, expr: (CAST(CAST(c2 AS CHAR(5)) AS CHAR(10)) = c2), indexIdForValidation: 1, referencedColumnIds: [2], tableId: 104}
-
-build
-ALTER TABLE t ALTER COLUMN c2 SET DATA TYPE BIGINT USING c2::BIGINT
-----
-- [[Column:{DescID: 104, ColumnID: 2}, ABSENT], PUBLIC]
- {columnId: 2, tableId: 104}
-- [[ColumnName:{DescID: 104, Name: c2, ColumnID: 2}, ABSENT], PUBLIC]
- {columnId: 2, name: c2, tableId: 104}
-- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: CHAR(10)}, ABSENT], PUBLIC]
- {columnId: 2, elementCreationMetadata: {in231OrLater: true, in243OrLater: true}, isNullable: true, tableId: 104, type: {family: StringFamily, oid: 1042, visibleType: 8, width: 10}, typeName: CHAR(10)}
-- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 1}, ABSENT], PUBLIC]
- {columnId: 3, indexId: 1, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 1}, ABSENT], PUBLIC]
- {columnId: 1, indexId: 1, kind: STORED, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 1}, ABSENT], PUBLIC]
- {columnId: 2, indexId: 1, kind: STORED, ordinalInKind: 1, tableId: 104}
-- [[PrimaryIndex:{DescID: 104, IndexID: 1, ConstraintID: 1}, ABSENT], PUBLIC]
- {constraintId: 1, indexId: 1, isUnique: true, tableId: 104}
-- [[IndexName:{DescID: 104, Name: t_pkey, IndexID: 1}, ABSENT], PUBLIC]
- {indexId: 1, name: t_pkey, tableId: 104}
-- [[IndexData:{DescID: 104, IndexID: 1}, ABSENT], PUBLIC]
- {indexId: 1, tableId: 104}
-- [[TableData:{DescID: 104, ReferencedDescID: 100}, PUBLIC], PUBLIC]
- {databaseId: 100, tableId: 104}
-- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], ABSENT]
- {constraintId: 2, indexId: 2, isUnique: true, sourceIndexId: 1, tableId: 104, temporaryIndexId: 3}
-- [[IndexName:{DescID: 104, Name: t_pkey, IndexID: 2}, TRANSIENT_ABSENT], ABSENT]
- {indexId: 2, name: t_pkey, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 2}, TRANSIENT_ABSENT], ABSENT]
- {columnId: 3, indexId: 2, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 2}, TRANSIENT_ABSENT], ABSENT]
- {columnId: 1, indexId: 2, kind: STORED, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 2}, TRANSIENT_ABSENT], ABSENT]
- {columnId: 2, indexId: 2, kind: STORED, ordinalInKind: 1, tableId: 104}
-- [[IndexData:{DescID: 104, IndexID: 2}, TRANSIENT_ABSENT], ABSENT]
- {indexId: 2, tableId: 104}
-- [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], ABSENT]
- {constraintId: 3, indexId: 3, isUnique: true, sourceIndexId: 1, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 3}, TRANSIENT_ABSENT], ABSENT]
- {columnId: 3, indexId: 3, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 3}, TRANSIENT_ABSENT], ABSENT]
- {columnId: 1, indexId: 3, kind: STORED, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 3}, TRANSIENT_ABSENT], ABSENT]
- {columnId: 2, indexId: 3, kind: STORED, ordinalInKind: 1, tableId: 104}
-- [[IndexData:{DescID: 104, IndexID: 3}, TRANSIENT_ABSENT], ABSENT]
- {indexId: 3, tableId: 104}
-- [[PrimaryIndex:{DescID: 104, IndexID: 4, ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2}, PUBLIC], ABSENT]
- {constraintId: 4, indexId: 4, isUnique: true, sourceIndexId: 2, tableId: 104, temporaryIndexId: 5}
-- [[IndexName:{DescID: 104, Name: t_pkey, IndexID: 4}, PUBLIC], ABSENT]
- {indexId: 4, name: t_pkey, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 4}, PUBLIC], ABSENT]
- {columnId: 3, indexId: 4, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 4}, PUBLIC], ABSENT]
- {columnId: 1, indexId: 4, kind: STORED, tableId: 104}
-- [[IndexData:{DescID: 104, IndexID: 4}, PUBLIC], ABSENT]
- {indexId: 4, tableId: 104}
-- [[TemporaryIndex:{DescID: 104, IndexID: 5, ConstraintID: 5, SourceIndexID: 2}, TRANSIENT_ABSENT], ABSENT]
- {constraintId: 5, indexId: 5, isUnique: true, sourceIndexId: 2, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 5}, TRANSIENT_ABSENT], ABSENT]
- {columnId: 3, indexId: 5, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 5}, TRANSIENT_ABSENT], ABSENT]
- {columnId: 1, indexId: 5, kind: STORED, tableId: 104}
-- [[IndexData:{DescID: 104, IndexID: 5}, TRANSIENT_ABSENT], ABSENT]
- {indexId: 5, tableId: 104}
-- [[Column:{DescID: 104, ColumnID: 4}, PUBLIC], ABSENT]
- {columnId: 4, pgAttributeNum: 2, tableId: 104}
-- [[ColumnName:{DescID: 104, Name: c2, ColumnID: 4}, PUBLIC], ABSENT]
- {columnId: 4, name: c2, tableId: 104}
-- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 4, TypeName: INT8}, PUBLIC], ABSENT]
- {columnId: 4, elementCreationMetadata: {in231OrLater: true, in243OrLater: true}, isNullable: true, tableId: 104, type: {family: IntFamily, oid: 20, width: 64}, typeName: INT8}
-- [[ColumnComputeExpression:{DescID: 104, ColumnID: 4}, TRANSIENT_ABSENT], ABSENT]
- {columnId: 4, expr: 'c2::INT8', referencedColumnIds: [2], tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 4, IndexID: 2}, TRANSIENT_ABSENT], ABSENT]
- {columnId: 4, indexId: 2, kind: STORED, ordinalInKind: 2, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 4, IndexID: 3}, TRANSIENT_ABSENT], ABSENT]
- {columnId: 4, indexId: 3, kind: STORED, ordinalInKind: 2, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 4, IndexID: 4}, PUBLIC], ABSENT]
- {columnId: 4, indexId: 4, kind: STORED, ordinalInKind: 1, tableId: 104}
-- [[IndexColumn:{DescID: 104, ColumnID: 4, IndexID: 5}, TRANSIENT_ABSENT], ABSENT]
- {columnId: 4, indexId: 5, kind: STORED, ordinalInKind: 1, tableId: 104}
-- [[ColumnName:{DescID: 104, Name: c2_shadow, ColumnID: 2}, TRANSIENT_ABSENT], ABSENT]
- {absentName: c2, columnId: 2, name: c2_shadow, tableId: 104}
diff --git a/pkg/sql/schemachanger/scdecomp/testdata/other b/pkg/sql/schemachanger/scdecomp/testdata/other
index e65fbc50d1e8..b810a7385ea8 100644
--- a/pkg/sql/schemachanger/scdecomp/testdata/other
+++ b/pkg/sql/schemachanger/scdecomp/testdata/other
@@ -285,31 +285,26 @@ ElementState:
tableId: 112
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: id
tableId: 112
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 112
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 112
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 112
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 112
@@ -565,37 +560,31 @@ ElementState:
tableId: 113
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: k
tableId: 113
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: id
tableId: 113
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 113
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 113
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 113
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 113
diff --git a/pkg/sql/schemachanger/scdecomp/testdata/sequence b/pkg/sql/schemachanger/scdecomp/testdata/sequence
index 251cf2a423cd..960bcc39b71f 100644
--- a/pkg/sql/schemachanger/scdecomp/testdata/sequence
+++ b/pkg/sql/schemachanger/scdecomp/testdata/sequence
@@ -186,37 +186,31 @@ ElementState:
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: id
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: cexpr
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 105
@@ -585,37 +579,31 @@ ElementState:
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: id
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: cexpr
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 105
diff --git a/pkg/sql/schemachanger/scdecomp/testdata/table b/pkg/sql/schemachanger/scdecomp/testdata/table
index fd6724117cda..be206e0d812a 100644
--- a/pkg/sql/schemachanger/scdecomp/testdata/table
+++ b/pkg/sql/schemachanger/scdecomp/testdata/table
@@ -85,31 +85,26 @@ ElementState:
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: id
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 104
@@ -461,43 +456,36 @@ ElementState:
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: id
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: name
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 3
name: price
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 105
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 105
@@ -1060,37 +1048,31 @@ ElementState:
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: id
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: j
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 104
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 104
@@ -1449,37 +1431,31 @@ ElementState:
tableId: 109
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: v
tableId: 109
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: rowid
tableId: 109
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 109
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 109
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 109
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 109
diff --git a/pkg/sql/schemachanger/scdecomp/testdata/type b/pkg/sql/schemachanger/scdecomp/testdata/type
index 479e755744bb..5fba6236dd41 100644
--- a/pkg/sql/schemachanger/scdecomp/testdata/type
+++ b/pkg/sql/schemachanger/scdecomp/testdata/type
@@ -207,55 +207,46 @@ ElementState:
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: id
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: g
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 3
name: s
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4
name: other
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 108
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 5
name: name
tableId: 108
@@ -1000,67 +991,56 @@ ElementState:
tableId: 111
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 1
name: id
tableId: 111
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 2
name: c
tableId: 111
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 3
name: cs
tableId: 111
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4
name: cstored
tableId: 111
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967292e+09
name: crdb_internal_origin_timestamp
tableId: 111
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967293e+09
name: crdb_internal_origin_id
tableId: 111
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967294e+09
name: tableoid
tableId: 111
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 4.294967295e+09
name: crdb_internal_mvcc_timestamp
tableId: 111
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 5
name: cvirtual
tableId: 111
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 6
name: name
tableId: 111
Status: PUBLIC
- ColumnName:
- absentName: ""
columnId: 7
name: crdb_internal_idx_expr
tableId: 111
diff --git a/pkg/sql/schemachanger/scdeps/sctestutils/sctestutils.go b/pkg/sql/schemachanger/scdeps/sctestutils/sctestutils.go
index 76ec7ffe5219..696b2329696c 100644
--- a/pkg/sql/schemachanger/scdeps/sctestutils/sctestutils.go
+++ b/pkg/sql/schemachanger/scdeps/sctestutils/sctestutils.go
@@ -84,7 +84,6 @@ func WithBuilderDependenciesFromTestServer(
// changer will allow non-fully implemented operations.
planner.SessionData().NewSchemaChangerMode = sessiondatapb.UseNewSchemaChangerUnsafe
planner.SessionData().EnableUniqueWithoutIndexConstraints = true
- planner.SessionData().AlterColumnTypeGeneralEnabled = true
fn(scdeps.NewBuilderDependencies(
execCfg.NodeInfo.LogicalClusterID(),
execCfg.Codec,
diff --git a/pkg/sql/schemachanger/scexec/scmutationexec/column.go b/pkg/sql/schemachanger/scexec/scmutationexec/column.go
index db32f0531a1c..eed3c96709fd 100644
--- a/pkg/sql/schemachanger/scexec/scmutationexec/column.go
+++ b/pkg/sql/schemachanger/scexec/scmutationexec/column.go
@@ -433,17 +433,11 @@ func (i *immediateVisitor) updateExistingColumnType(
}
func clearComputedExpr(col *descpb.ColumnDescriptor) {
- // This operation zeros out the computed column expression to remove references
- // to sequences or other dependencies, but it can't always remove the expression entirely.
- //
- // For virtual computed columns, removing the expression would turn the column
- // into a virtual non-computed column, which doesn't make sense. When dropping
- // the expression for a column that still exists (e.g., a stored column), we do
- // want to remove the expression.
- if col.Virtual {
- null := tree.Serialize(tree.DNull)
- col.ComputeExpr = &null
- } else {
- col.ComputeExpr = nil
- }
+ // This operation needs to zero the computed column expression to remove
+ // any references to sequences and whatnot but it can't simply remove the
+ // expression entirely, otherwise in the case of virtual computed columns
+ // the column descriptor will then be interpreted as a virtual non-computed
+ // column, which doesn't make any sense.
+ null := tree.Serialize(tree.DNull)
+ col.ComputeExpr = &null
}
diff --git a/pkg/sql/schemachanger/scpb/elements.proto b/pkg/sql/schemachanger/scpb/elements.proto
index 8d30ddcb1856..34320c62f9f5 100644
--- a/pkg/sql/schemachanger/scpb/elements.proto
+++ b/pkg/sql/schemachanger/scpb/elements.proto
@@ -614,9 +614,6 @@ message ColumnName {
uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"];
uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"];
string name = 3;
- // AbsentName is the name to use when ColumnName transitions to ABSENT. If this
- // is omitted, a placeholder name is used.
- string absent_name = 4;
}
message IndexName {
diff --git a/pkg/sql/schemachanger/scpb/uml/table.puml b/pkg/sql/schemachanger/scpb/uml/table.puml
index aeda746a612d..2594f2ecb44b 100644
--- a/pkg/sql/schemachanger/scpb/uml/table.puml
+++ b/pkg/sql/schemachanger/scpb/uml/table.puml
@@ -61,7 +61,6 @@ object ColumnName
ColumnName : TableID
ColumnName : ColumnID
ColumnName : Name
-ColumnName : AbsentName
object ColumnNotNull
diff --git a/pkg/sql/schemachanger/scplan/BUILD.bazel b/pkg/sql/schemachanger/scplan/BUILD.bazel
index 8a5f1622d3a4..ea214c4729f0 100644
--- a/pkg/sql/schemachanger/scplan/BUILD.bazel
+++ b/pkg/sql/schemachanger/scplan/BUILD.bazel
@@ -19,7 +19,6 @@ go_library(
"//pkg/sql/schemachanger/scplan/internal/rules/current",
"//pkg/sql/schemachanger/scplan/internal/rules/release_24_1",
"//pkg/sql/schemachanger/scplan/internal/rules/release_24_2",
- "//pkg/sql/schemachanger/scplan/internal/rules/release_24_3",
"//pkg/sql/schemachanger/scplan/internal/scgraph",
"//pkg/sql/schemachanger/scplan/internal/scgraphviz",
"//pkg/sql/schemachanger/scplan/internal/scstage",
diff --git a/pkg/sql/schemachanger/scplan/internal/opgen/opgen_column_name.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_column_name.go
index 1284606efd1a..ba81500ca94d 100644
--- a/pkg/sql/schemachanger/scplan/internal/opgen/opgen_column_name.go
+++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_column_name.go
@@ -26,21 +26,15 @@ func init() {
}),
),
),
- toTransientAbsentLikePublic(),
toAbsent(
scpb.Status_PUBLIC,
to(scpb.Status_ABSENT,
emit(func(this *scpb.ColumnName) *scop.SetColumnName {
- op := &scop.SetColumnName{
+ return &scop.SetColumnName{
TableID: this.TableID,
ColumnID: this.ColumnID,
Name: tabledesc.ColumnNamePlaceholder(this.ColumnID),
}
- // If a name was provided for the transition to absent, override the placeholder.
- if this.AbsentName != "" {
- op.Name = this.AbsentName
- }
- return op
}),
),
),
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/current/dep_alter_column_type.go b/pkg/sql/schemachanger/scplan/internal/rules/current/dep_alter_column_type.go
index d04283055c6e..0476d4f95aae 100644
--- a/pkg/sql/schemachanger/scplan/internal/rules/current/dep_alter_column_type.go
+++ b/pkg/sql/schemachanger/scplan/internal/rules/current/dep_alter_column_type.go
@@ -52,21 +52,4 @@ func init() {
}
},
)
-
- registerDepRule(
- "adding a transient column compute expression moves to 'absent' after PK validation to ensures it's there for the backfill",
- scgraph.Precedence,
- "primary-index", "transient-column-compute",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.PrimaryIndex)(nil)),
- to.Type((*scpb.ColumnComputeExpression)(nil)),
- JoinOnDescID(from, to, "table-id"),
- from.TargetStatus(scpb.ToPublic),
- to.TargetStatus(scpb.Transient),
- from.CurrentStatus(scpb.Status_VALIDATED),
- to.CurrentStatus(scpb.Status_TRANSIENT_ABSENT),
- }
- },
- )
}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/current/helpers.go b/pkg/sql/schemachanger/scplan/internal/rules/current/helpers.go
index 129436ce38b8..207a7f55a569 100644
--- a/pkg/sql/schemachanger/scplan/internal/rules/current/helpers.go
+++ b/pkg/sql/schemachanger/scplan/internal/rules/current/helpers.go
@@ -16,12 +16,10 @@ import (
const (
// rulesVersion version of elements that can be appended to rel rule names.
- rulesVersion = "-25.1"
+ rulesVersion = "-24.3"
)
// rulesVersionKey version of elements used by this rule set.
-// TODO(annie): Need to update the rulesVersionKey here to point to
-// clusterversion.V25_1 when that is available.
var rulesVersionKey = clusterversion.V24_3
// descriptorIsNotBeingDropped creates a clause which leads to the outer clause
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/current/testdata/deprules b/pkg/sql/schemachanger/scplan/internal/rules/current/testdata/deprules
index 360d6d3ff773..bfcbdf09838c 100644
--- a/pkg/sql/schemachanger/scplan/internal/rules/current/testdata/deprules
+++ b/pkg/sql/schemachanger/scplan/internal/rules/current/testdata/deprules
@@ -13,12 +13,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: TRANSIENT_ABSENT->ABSENT'
@@ -34,12 +34,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_ABSENT
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->VALIDATED'
@@ -55,12 +55,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->VALIDATED'
@@ -76,12 +76,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
@@ -97,12 +97,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_VALIDATED_WRITE_ONLY_TRANSIENT_WRITE_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -119,12 +119,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
@@ -140,12 +140,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -161,12 +161,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -182,12 +182,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: ABSENT->WRITE_ONLY'
@@ -203,12 +203,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: PUBLIC->TRANSIENT_VALIDATED'
@@ -224,12 +224,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->TRANSIENT_ABSENT'
@@ -245,12 +245,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- $next-Node[CurrentStatus] = TRANSIENT_ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_WRITE_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -267,12 +267,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -288,12 +288,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -309,12 +309,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'Column transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
@@ -330,12 +330,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'Column transitions to ABSENT uphold 2-version invariant: PUBLIC->WRITE_ONLY'
@@ -351,12 +351,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'Column transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
@@ -372,12 +372,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'Column transitions to PUBLIC uphold 2-version invariant: ABSENT->DELETE_ONLY'
@@ -393,12 +393,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'Column transitions to PUBLIC uphold 2-version invariant: DELETE_ONLY->WRITE_ONLY'
@@ -414,12 +414,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'Column transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->PUBLIC'
@@ -435,12 +435,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ColumnNotNull transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
@@ -456,12 +456,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ColumnNotNull transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
@@ -477,12 +477,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_WRITE_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -499,12 +499,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ColumnNotNull transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
@@ -520,12 +520,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ColumnNotNull transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -541,12 +541,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ColumnNotNull transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -562,12 +562,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: Computed column expression is dropped before the column it depends on
@@ -762,12 +762,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ForeignKeyConstraint transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
@@ -783,12 +783,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_WRITE_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -805,12 +805,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ForeignKeyConstraint transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
@@ -826,12 +826,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ForeignKeyConstraint transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -847,12 +847,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ForeignKeyConstraint transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -868,12 +868,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: New primary index should go public only after columns being dropped move to WRITE_ONLY
@@ -904,12 +904,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = BACKFILLED
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILL_ONLY->DELETE_ONLY'
@@ -925,12 +925,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = BACKFILL_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
@@ -946,12 +946,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_DELETE_ONLY_BACKFILLED_TRANSIENT_BACKFILLED_BACKFILL_ONLY_TRANSIENT_BACKFILL_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -968,12 +968,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = MERGED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: MERGE_ONLY->WRITE_ONLY'
@@ -989,12 +989,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = MERGE_ONLY
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
@@ -1010,12 +1010,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_ABSENT->ABSENT'
@@ -1031,12 +1031,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_ABSENT
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_BACKFILLED->DELETE_ONLY'
@@ -1052,12 +1052,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_BACKFILLED
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_BACKFILL_ONLY->DELETE_ONLY'
@@ -1073,12 +1073,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_BACKFILL_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->DELETE_ONLY'
@@ -1094,12 +1094,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_MERGED->WRITE_ONLY'
@@ -1115,12 +1115,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_MERGED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_MERGE_ONLY->WRITE_ONLY'
@@ -1136,12 +1136,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_MERGE_ONLY
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->VALIDATED'
@@ -1157,12 +1157,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->WRITE_ONLY'
@@ -1178,12 +1178,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: VALIDATED->WRITE_ONLY'
@@ -1199,12 +1199,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_VALIDATED($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -1221,12 +1221,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_VALIDATED_TRANSIENT_WRITE_ONLY_MERGE_ONLY_TRANSIENT_MERGE_ONLY_MERGED_TRANSIENT_MERGED($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -1243,12 +1243,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = BACKFILL_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
@@ -1264,12 +1264,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = BACKFILLED
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILL_ONLY->BACKFILLED'
@@ -1285,12 +1285,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = BACKFILL_ONLY
- $next-Node[CurrentStatus] = BACKFILLED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: DELETE_ONLY->MERGE_ONLY'
@@ -1306,12 +1306,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = MERGE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: MERGED->WRITE_ONLY'
@@ -1327,12 +1327,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = MERGED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: MERGE_ONLY->MERGED'
@@ -1348,12 +1348,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = MERGE_ONLY
- $next-Node[CurrentStatus] = MERGED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -1369,12 +1369,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -1390,12 +1390,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: ABSENT->BACKFILL_ONLY'
@@ -1411,12 +1411,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = BACKFILL_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
@@ -1432,12 +1432,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = BACKFILLED
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: BACKFILL_ONLY->BACKFILLED'
@@ -1453,12 +1453,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = BACKFILL_ONLY
- $next-Node[CurrentStatus] = BACKFILLED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: DELETE_ONLY->MERGE_ONLY'
@@ -1474,12 +1474,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = MERGE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: MERGED->WRITE_ONLY'
@@ -1495,12 +1495,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = MERGED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: MERGE_ONLY->MERGED'
@@ -1516,12 +1516,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = MERGE_ONLY
- $next-Node[CurrentStatus] = MERGED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: PUBLIC->TRANSIENT_VALIDATED'
@@ -1537,12 +1537,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_BACKFILLED->TRANSIENT_DELETE_ONLY'
@@ -1558,12 +1558,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_BACKFILLED
- $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_BACKFILL_ONLY->TRANSIENT_DELETE_ONLY'
@@ -1579,12 +1579,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_BACKFILL_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->TRANSIENT_ABSENT'
@@ -1600,12 +1600,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_BACKFILLED_TRANSIENT_BACKFILL_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -1622,12 +1622,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_MERGED
- $next-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_MERGE_ONLY->TRANSIENT_WRITE_ONLY'
@@ -1643,12 +1643,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_MERGE_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->TRANSIENT_WRITE_ONLY'
@@ -1664,12 +1664,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- $next-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->TRANSIENT_DELETE_ONLY'
@@ -1685,12 +1685,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_VALIDATED_TRANSIENT_MERGE_ONLY_TRANSIENT_MERGED($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -1707,12 +1707,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -1728,12 +1728,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
@@ -1749,12 +1749,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = BACKFILLED
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILL_ONLY->DELETE_ONLY'
@@ -1770,12 +1770,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = BACKFILL_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
@@ -1791,12 +1791,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_BACKFILLED_BACKFILL_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -1813,12 +1813,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = MERGED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: MERGE_ONLY->WRITE_ONLY'
@@ -1834,12 +1834,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = MERGE_ONLY
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
@@ -1855,12 +1855,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: VALIDATED->WRITE_ONLY'
@@ -1876,12 +1876,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
@@ -1897,12 +1897,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_VALIDATED_MERGE_ONLY_MERGED($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -1919,12 +1919,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = BACKFILL_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
@@ -1940,12 +1940,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = BACKFILLED
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILL_ONLY->BACKFILLED'
@@ -1961,12 +1961,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = BACKFILL_ONLY
- $next-Node[CurrentStatus] = BACKFILLED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: DELETE_ONLY->MERGE_ONLY'
@@ -1982,12 +1982,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = MERGE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: MERGED->WRITE_ONLY'
@@ -2003,12 +2003,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = MERGED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: MERGE_ONLY->MERGED'
@@ -2024,12 +2024,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = MERGE_ONLY
- $next-Node[CurrentStatus] = MERGED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -2045,12 +2045,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -2066,12 +2066,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
@@ -2087,12 +2087,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_DELETE_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -2109,12 +2109,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_ABSENT
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->DELETE_ONLY'
@@ -2130,12 +2130,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
@@ -2151,12 +2151,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: ABSENT->DELETE_ONLY'
@@ -2172,12 +2172,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: DELETE_ONLY->WRITE_ONLY'
@@ -2193,12 +2193,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->TRANSIENT_ABSENT'
@@ -2214,12 +2214,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: WRITE_ONLY->TRANSIENT_DELETE_ONLY'
@@ -2235,12 +2235,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'UniqueWithoutIndexConstraint transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
@@ -2256,12 +2256,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'UniqueWithoutIndexConstraint transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
@@ -2277,12 +2277,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_WRITE_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -2299,12 +2299,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'UniqueWithoutIndexConstraint transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
@@ -2320,12 +2320,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'UniqueWithoutIndexConstraint transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -2341,12 +2341,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'UniqueWithoutIndexConstraint transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -2362,28 +2362,14 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
-- name: adding a transient column compute expression moves to 'absent' after PK validation to ensures it's there for the backfill
- from: primary-index-Node
- kind: Precedence
- to: transient-column-compute-Node
- query:
- - $primary-index[Type] = '*scpb.PrimaryIndex'
- - $transient-column-compute[Type] = '*scpb.ColumnComputeExpression'
- - joinOnDescID($primary-index, $transient-column-compute, $table-id)
- - $primary-index-Target[TargetStatus] = PUBLIC
- - $transient-column-compute-Target[TargetStatus] = TRANSIENT_ABSENT
- - $primary-index-Node[CurrentStatus] = VALIDATED
- - $transient-column-compute-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($primary-index, $primary-index-Target, $primary-index-Node)
- - joinTargetNode($transient-column-compute, $transient-column-compute-Target, $transient-column-compute-Node)
- name: all adding indexes reached BACKFILL_ONLY before any of their columns disappear
from: index-Node
kind: Precedence
@@ -2664,7 +2650,7 @@ deprules
to: column-Node
query:
- $column-type[Type] = '*scpb.ColumnType'
- - descriptorIsNotBeingDropped-25.1($column-type)
+ - descriptorIsNotBeingDropped-24.3($column-type)
- $column[Type] = '*scpb.Column'
- joinOnColumnID($column-type, $column, $table-id, $col-id)
- toAbsent($column-type-Target, $column-Target)
@@ -3192,7 +3178,7 @@ deprules
- $referenced-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.EnumType']
- $referenced-descriptor[DescID] = $fromDescID
- $referencing-via-type[ReferencedTypeIDs] CONTAINS $fromDescID
- - descriptorIsNotBeingDropped-25.1($referencing-via-type)
+ - descriptorIsNotBeingDropped-24.3($referencing-via-type)
- $referencing-via-type[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.SecondaryIndexPartial']
- toAbsent($referenced-descriptor-Target, $referencing-via-type-Target)
- $referenced-descriptor-Node[CurrentStatus] = DROPPED
@@ -3614,7 +3600,7 @@ deprules
- $column[Type] = '*scpb.Column'
- ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-25.1($index-column)
+ - descriptorIsNotBeingDropped-24.3($index-column)
- toAbsent($index-Target, $column-Target)
- $index-Node[CurrentStatus] = ABSENT
- $column-Node[CurrentStatus] = ABSENT
@@ -3629,7 +3615,7 @@ deprules
- $column[Type] = '*scpb.Column'
- ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-25.1($index-column)
+ - descriptorIsNotBeingDropped-24.3($index-column)
- transient($index-Target, $column-Target)
- $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- $column-Node[CurrentStatus] = TRANSIENT_ABSENT
@@ -3644,7 +3630,7 @@ deprules
- $column[Type] = '*scpb.Column'
- ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-25.1($index-column)
+ - descriptorIsNotBeingDropped-24.3($index-column)
- $index-Target[TargetStatus] = TRANSIENT_ABSENT
- $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- $column-Target[TargetStatus] = ABSENT
@@ -3660,7 +3646,7 @@ deprules
- $column[Type] = '*scpb.Column'
- ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-25.1($index-column)
+ - descriptorIsNotBeingDropped-24.3($index-column)
- $index-Target[TargetStatus] = ABSENT
- $index-Node[CurrentStatus] = ABSENT
- $column-Target[TargetStatus] = TRANSIENT_ABSENT
@@ -3719,7 +3705,7 @@ deprules
to: index-Node
query:
- $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-25.1($partial-predicate)
+ - descriptorIsNotBeingDropped-24.3($partial-predicate)
- $index[Type] = '*scpb.SecondaryIndex'
- joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- toAbsent($partial-predicate-Target, $index-Target)
@@ -3733,7 +3719,7 @@ deprules
to: index-Node
query:
- $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-25.1($partial-predicate)
+ - descriptorIsNotBeingDropped-24.3($partial-predicate)
- $index[Type] = '*scpb.SecondaryIndex'
- joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- transient($partial-predicate-Target, $index-Target)
@@ -3747,7 +3733,7 @@ deprules
to: index-Node
query:
- $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-25.1($partial-predicate)
+ - descriptorIsNotBeingDropped-24.3($partial-predicate)
- $index[Type] = '*scpb.SecondaryIndex'
- joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- $partial-predicate-Target[TargetStatus] = TRANSIENT_ABSENT
@@ -3762,7 +3748,7 @@ deprules
to: index-Node
query:
- $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-25.1($partial-predicate)
+ - descriptorIsNotBeingDropped-24.3($partial-predicate)
- $index[Type] = '*scpb.SecondaryIndex'
- joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- $partial-predicate-Target[TargetStatus] = ABSENT
@@ -4077,7 +4063,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- toAbsent($secondary-partial-index-Target, $column-Target)
- $secondary-partial-index-Node[CurrentStatus] = DELETE_ONLY
@@ -4092,7 +4078,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- transient($secondary-partial-index-Target, $column-Target)
- $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
@@ -4107,7 +4093,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- $secondary-partial-index-Target[TargetStatus] = TRANSIENT_ABSENT
- $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
@@ -4123,7 +4109,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- $secondary-partial-index-Target[TargetStatus] = ABSENT
- $secondary-partial-index-Node[CurrentStatus] = DELETE_ONLY
@@ -4139,7 +4125,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- toAbsent($secondary-partial-index-Target, $column-Target)
- $secondary-partial-index-Node[CurrentStatus] = ABSENT
@@ -4154,7 +4140,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- transient($secondary-partial-index-Target, $column-Target)
- $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_ABSENT
@@ -4169,7 +4155,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- $secondary-partial-index-Target[TargetStatus] = TRANSIENT_ABSENT
- $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_ABSENT
@@ -4185,7 +4171,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- $secondary-partial-index-Target[TargetStatus] = ABSENT
- $secondary-partial-index-Node[CurrentStatus] = ABSENT
@@ -4259,7 +4245,7 @@ deprules
- toAbsent($index-Target, $column-Target)
- $index-Node[CurrentStatus] = VALIDATED
- $column-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($index-column)
+ - descriptorIsNotBeingDropped-24.3($index-column)
- isIndexKeyColumnKey(*scpb.IndexColumn)($index-column)
- joinTargetNode($index, $index-Target, $index-Node)
- joinTargetNode($column, $column-Target, $column-Node)
@@ -4403,12 +4389,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: TRANSIENT_ABSENT->ABSENT'
@@ -4424,12 +4410,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_ABSENT
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->VALIDATED'
@@ -4445,12 +4431,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->VALIDATED'
@@ -4466,12 +4452,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
@@ -4487,12 +4473,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_VALIDATED_WRITE_ONLY_TRANSIENT_WRITE_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -4509,12 +4495,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
@@ -4530,12 +4516,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -4551,12 +4537,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -4572,12 +4558,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: ABSENT->WRITE_ONLY'
@@ -4593,12 +4579,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: PUBLIC->TRANSIENT_VALIDATED'
@@ -4614,12 +4600,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->TRANSIENT_ABSENT'
@@ -4635,12 +4621,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- $next-Node[CurrentStatus] = TRANSIENT_ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_WRITE_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -4657,12 +4643,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -4678,12 +4664,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -4699,12 +4685,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'Column transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
@@ -4720,12 +4706,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'Column transitions to ABSENT uphold 2-version invariant: PUBLIC->WRITE_ONLY'
@@ -4741,12 +4727,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'Column transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
@@ -4762,12 +4748,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'Column transitions to PUBLIC uphold 2-version invariant: ABSENT->DELETE_ONLY'
@@ -4783,12 +4769,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'Column transitions to PUBLIC uphold 2-version invariant: DELETE_ONLY->WRITE_ONLY'
@@ -4804,12 +4790,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'Column transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->PUBLIC'
@@ -4825,12 +4811,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ColumnNotNull transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
@@ -4846,12 +4832,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ColumnNotNull transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
@@ -4867,12 +4853,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_WRITE_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -4889,12 +4875,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ColumnNotNull transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
@@ -4910,12 +4896,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ColumnNotNull transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -4931,12 +4917,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ColumnNotNull transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -4952,12 +4938,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: Computed column expression is dropped before the column it depends on
@@ -5152,12 +5138,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ForeignKeyConstraint transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
@@ -5173,12 +5159,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_WRITE_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -5195,12 +5181,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ForeignKeyConstraint transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
@@ -5216,12 +5202,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ForeignKeyConstraint transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -5237,12 +5223,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'ForeignKeyConstraint transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -5258,12 +5244,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: New primary index should go public only after columns being dropped move to WRITE_ONLY
@@ -5294,12 +5280,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = BACKFILLED
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILL_ONLY->DELETE_ONLY'
@@ -5315,12 +5301,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = BACKFILL_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
@@ -5336,12 +5322,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_DELETE_ONLY_BACKFILLED_TRANSIENT_BACKFILLED_BACKFILL_ONLY_TRANSIENT_BACKFILL_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -5358,12 +5344,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = MERGED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: MERGE_ONLY->WRITE_ONLY'
@@ -5379,12 +5365,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = MERGE_ONLY
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
@@ -5400,12 +5386,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_ABSENT->ABSENT'
@@ -5421,12 +5407,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_ABSENT
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_BACKFILLED->DELETE_ONLY'
@@ -5442,12 +5428,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_BACKFILLED
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_BACKFILL_ONLY->DELETE_ONLY'
@@ -5463,12 +5449,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_BACKFILL_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->DELETE_ONLY'
@@ -5484,12 +5470,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_MERGED->WRITE_ONLY'
@@ -5505,12 +5491,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_MERGED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_MERGE_ONLY->WRITE_ONLY'
@@ -5526,12 +5512,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_MERGE_ONLY
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->VALIDATED'
@@ -5547,12 +5533,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->WRITE_ONLY'
@@ -5568,12 +5554,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: VALIDATED->WRITE_ONLY'
@@ -5589,12 +5575,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_VALIDATED($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -5611,12 +5597,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_VALIDATED_TRANSIENT_WRITE_ONLY_MERGE_ONLY_TRANSIENT_MERGE_ONLY_MERGED_TRANSIENT_MERGED($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -5633,12 +5619,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = BACKFILL_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
@@ -5654,12 +5640,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = BACKFILLED
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILL_ONLY->BACKFILLED'
@@ -5675,12 +5661,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = BACKFILL_ONLY
- $next-Node[CurrentStatus] = BACKFILLED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: DELETE_ONLY->MERGE_ONLY'
@@ -5696,12 +5682,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = MERGE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: MERGED->WRITE_ONLY'
@@ -5717,12 +5703,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = MERGED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: MERGE_ONLY->MERGED'
@@ -5738,12 +5724,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = MERGE_ONLY
- $next-Node[CurrentStatus] = MERGED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -5759,12 +5745,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -5780,12 +5766,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: ABSENT->BACKFILL_ONLY'
@@ -5801,12 +5787,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = BACKFILL_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
@@ -5822,12 +5808,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = BACKFILLED
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: BACKFILL_ONLY->BACKFILLED'
@@ -5843,12 +5829,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = BACKFILL_ONLY
- $next-Node[CurrentStatus] = BACKFILLED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: DELETE_ONLY->MERGE_ONLY'
@@ -5864,12 +5850,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = MERGE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: MERGED->WRITE_ONLY'
@@ -5885,12 +5871,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = MERGED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: MERGE_ONLY->MERGED'
@@ -5906,12 +5892,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = MERGE_ONLY
- $next-Node[CurrentStatus] = MERGED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: PUBLIC->TRANSIENT_VALIDATED'
@@ -5927,12 +5913,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_BACKFILLED->TRANSIENT_DELETE_ONLY'
@@ -5948,12 +5934,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_BACKFILLED
- $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_BACKFILL_ONLY->TRANSIENT_DELETE_ONLY'
@@ -5969,12 +5955,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_BACKFILL_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->TRANSIENT_ABSENT'
@@ -5990,12 +5976,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_BACKFILLED_TRANSIENT_BACKFILL_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -6012,12 +5998,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_MERGED
- $next-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_MERGE_ONLY->TRANSIENT_WRITE_ONLY'
@@ -6033,12 +6019,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_MERGE_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->TRANSIENT_WRITE_ONLY'
@@ -6054,12 +6040,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- $next-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->TRANSIENT_DELETE_ONLY'
@@ -6075,12 +6061,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_VALIDATED_TRANSIENT_MERGE_ONLY_TRANSIENT_MERGED($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -6097,12 +6083,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -6118,12 +6104,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
@@ -6139,12 +6125,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = BACKFILLED
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILL_ONLY->DELETE_ONLY'
@@ -6160,12 +6146,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = BACKFILL_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
@@ -6181,12 +6167,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_BACKFILLED_BACKFILL_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -6203,12 +6189,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = MERGED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: MERGE_ONLY->WRITE_ONLY'
@@ -6224,12 +6210,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = MERGE_ONLY
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
@@ -6245,12 +6231,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: VALIDATED->WRITE_ONLY'
@@ -6266,12 +6252,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
@@ -6287,12 +6273,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_VALIDATED_MERGE_ONLY_MERGED($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -6309,12 +6295,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = BACKFILL_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
@@ -6330,12 +6316,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = BACKFILLED
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILL_ONLY->BACKFILLED'
@@ -6351,12 +6337,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = BACKFILL_ONLY
- $next-Node[CurrentStatus] = BACKFILLED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: DELETE_ONLY->MERGE_ONLY'
@@ -6372,12 +6358,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = MERGE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: MERGED->WRITE_ONLY'
@@ -6393,12 +6379,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = MERGED
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: MERGE_ONLY->MERGED'
@@ -6414,12 +6400,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = MERGE_ONLY
- $next-Node[CurrentStatus] = MERGED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -6435,12 +6421,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -6456,12 +6442,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
@@ -6477,12 +6463,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_TRANSIENT_DELETE_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -6499,12 +6485,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_ABSENT
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->DELETE_ONLY'
@@ -6520,12 +6506,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
@@ -6541,12 +6527,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: ABSENT->DELETE_ONLY'
@@ -6562,12 +6548,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: DELETE_ONLY->WRITE_ONLY'
@@ -6583,12 +6569,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = DELETE_ONLY
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->TRANSIENT_ABSENT'
@@ -6604,12 +6590,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: WRITE_ONLY->TRANSIENT_DELETE_ONLY'
@@ -6625,12 +6611,12 @@ deprules
- $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'UniqueWithoutIndexConstraint transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
@@ -6646,12 +6632,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = PUBLIC
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'UniqueWithoutIndexConstraint transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
@@ -6667,12 +6653,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- nodeNotExistsWithStatusIn_WRITE_ONLY($prev-Target)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
@@ -6689,12 +6675,12 @@ deprules
- $prev-Target[TargetStatus] = ABSENT
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'UniqueWithoutIndexConstraint transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
@@ -6710,12 +6696,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = ABSENT
- $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'UniqueWithoutIndexConstraint transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
@@ -6731,12 +6717,12 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = VALIDATED
- $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
- name: 'UniqueWithoutIndexConstraint transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
@@ -6752,28 +6738,14 @@ deprules
- $prev-Target[TargetStatus] = PUBLIC
- $prev-Node[CurrentStatus] = WRITE_ONLY
- $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-25.1($prev)
+ - descriptorIsNotBeingDropped-24.3($prev)
- $descriptor-data[Type] = '*scpb.TableData'
- joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- $descriptor-data-Node[CurrentStatus] = PUBLIC
- $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-25.1($descID)
+ - descriptorIsDataNotBeingAdded-24.3($descID)
- joinTargetNode($prev, $prev-Target, $prev-Node)
- joinTargetNode($next, $next-Target, $next-Node)
-- name: adding a transient column compute expression moves to 'absent' after PK validation to ensures it's there for the backfill
- from: primary-index-Node
- kind: Precedence
- to: transient-column-compute-Node
- query:
- - $primary-index[Type] = '*scpb.PrimaryIndex'
- - $transient-column-compute[Type] = '*scpb.ColumnComputeExpression'
- - joinOnDescID($primary-index, $transient-column-compute, $table-id)
- - $primary-index-Target[TargetStatus] = PUBLIC
- - $transient-column-compute-Target[TargetStatus] = TRANSIENT_ABSENT
- - $primary-index-Node[CurrentStatus] = VALIDATED
- - $transient-column-compute-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($primary-index, $primary-index-Target, $primary-index-Node)
- - joinTargetNode($transient-column-compute, $transient-column-compute-Target, $transient-column-compute-Node)
- name: all adding indexes reached BACKFILL_ONLY before any of their columns disappear
from: index-Node
kind: Precedence
@@ -7054,7 +7026,7 @@ deprules
to: column-Node
query:
- $column-type[Type] = '*scpb.ColumnType'
- - descriptorIsNotBeingDropped-25.1($column-type)
+ - descriptorIsNotBeingDropped-24.3($column-type)
- $column[Type] = '*scpb.Column'
- joinOnColumnID($column-type, $column, $table-id, $col-id)
- toAbsent($column-type-Target, $column-Target)
@@ -7582,7 +7554,7 @@ deprules
- $referenced-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.EnumType']
- $referenced-descriptor[DescID] = $fromDescID
- $referencing-via-type[ReferencedTypeIDs] CONTAINS $fromDescID
- - descriptorIsNotBeingDropped-25.1($referencing-via-type)
+ - descriptorIsNotBeingDropped-24.3($referencing-via-type)
- $referencing-via-type[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.SecondaryIndexPartial']
- toAbsent($referenced-descriptor-Target, $referencing-via-type-Target)
- $referenced-descriptor-Node[CurrentStatus] = DROPPED
@@ -8004,7 +7976,7 @@ deprules
- $column[Type] = '*scpb.Column'
- ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-25.1($index-column)
+ - descriptorIsNotBeingDropped-24.3($index-column)
- toAbsent($index-Target, $column-Target)
- $index-Node[CurrentStatus] = ABSENT
- $column-Node[CurrentStatus] = ABSENT
@@ -8019,7 +7991,7 @@ deprules
- $column[Type] = '*scpb.Column'
- ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-25.1($index-column)
+ - descriptorIsNotBeingDropped-24.3($index-column)
- transient($index-Target, $column-Target)
- $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- $column-Node[CurrentStatus] = TRANSIENT_ABSENT
@@ -8034,7 +8006,7 @@ deprules
- $column[Type] = '*scpb.Column'
- ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-25.1($index-column)
+ - descriptorIsNotBeingDropped-24.3($index-column)
- $index-Target[TargetStatus] = TRANSIENT_ABSENT
- $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- $column-Target[TargetStatus] = ABSENT
@@ -8050,7 +8022,7 @@ deprules
- $column[Type] = '*scpb.Column'
- ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-25.1($index-column)
+ - descriptorIsNotBeingDropped-24.3($index-column)
- $index-Target[TargetStatus] = ABSENT
- $index-Node[CurrentStatus] = ABSENT
- $column-Target[TargetStatus] = TRANSIENT_ABSENT
@@ -8109,7 +8081,7 @@ deprules
to: index-Node
query:
- $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-25.1($partial-predicate)
+ - descriptorIsNotBeingDropped-24.3($partial-predicate)
- $index[Type] = '*scpb.SecondaryIndex'
- joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- toAbsent($partial-predicate-Target, $index-Target)
@@ -8123,7 +8095,7 @@ deprules
to: index-Node
query:
- $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-25.1($partial-predicate)
+ - descriptorIsNotBeingDropped-24.3($partial-predicate)
- $index[Type] = '*scpb.SecondaryIndex'
- joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- transient($partial-predicate-Target, $index-Target)
@@ -8137,7 +8109,7 @@ deprules
to: index-Node
query:
- $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-25.1($partial-predicate)
+ - descriptorIsNotBeingDropped-24.3($partial-predicate)
- $index[Type] = '*scpb.SecondaryIndex'
- joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- $partial-predicate-Target[TargetStatus] = TRANSIENT_ABSENT
@@ -8152,7 +8124,7 @@ deprules
to: index-Node
query:
- $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-25.1($partial-predicate)
+ - descriptorIsNotBeingDropped-24.3($partial-predicate)
- $index[Type] = '*scpb.SecondaryIndex'
- joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- $partial-predicate-Target[TargetStatus] = ABSENT
@@ -8467,7 +8439,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- toAbsent($secondary-partial-index-Target, $column-Target)
- $secondary-partial-index-Node[CurrentStatus] = DELETE_ONLY
@@ -8482,7 +8454,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- transient($secondary-partial-index-Target, $column-Target)
- $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
@@ -8497,7 +8469,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- $secondary-partial-index-Target[TargetStatus] = TRANSIENT_ABSENT
- $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
@@ -8513,7 +8485,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- $secondary-partial-index-Target[TargetStatus] = ABSENT
- $secondary-partial-index-Node[CurrentStatus] = DELETE_ONLY
@@ -8529,7 +8501,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- toAbsent($secondary-partial-index-Target, $column-Target)
- $secondary-partial-index-Node[CurrentStatus] = ABSENT
@@ -8544,7 +8516,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- transient($secondary-partial-index-Target, $column-Target)
- $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_ABSENT
@@ -8559,7 +8531,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- $secondary-partial-index-Target[TargetStatus] = TRANSIENT_ABSENT
- $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_ABSENT
@@ -8575,7 +8547,7 @@ deprules
- $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- $column[Type] = '*scpb.Column'
- joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-25.1($secondary-partial-index)
+ - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- $secondary-partial-index-Target[TargetStatus] = ABSENT
- $secondary-partial-index-Node[CurrentStatus] = ABSENT
@@ -8649,7 +8621,7 @@ deprules
- toAbsent($index-Target, $column-Target)
- $index-Node[CurrentStatus] = VALIDATED
- $column-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-25.1($index-column)
+ - descriptorIsNotBeingDropped-24.3($index-column)
- isIndexKeyColumnKey(*scpb.IndexColumn)($index-column)
- joinTargetNode($index, $index-Target, $index-Node)
- joinTargetNode($column, $column-Target, $column-Node)
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/BUILD.bazel b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/BUILD.bazel
deleted file mode 100644
index 8386025749c5..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/BUILD.bazel
+++ /dev/null
@@ -1,61 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
- name = "release_24_3",
- srcs = [
- "dep_add_column.go",
- "dep_add_constraint.go",
- "dep_add_index.go",
- "dep_add_index_and_column.go",
- "dep_add_index_and_constraint.go",
- "dep_alter_column_type.go",
- "dep_create.go",
- "dep_create_function.go",
- "dep_drop_column.go",
- "dep_drop_constraint.go",
- "dep_drop_index.go",
- "dep_drop_index_and_column.go",
- "dep_drop_object.go",
- "dep_garbage_collection.go",
- "dep_swap_index.go",
- "dep_two_version.go",
- "helpers.go",
- "registry.go",
- ],
- importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules/release_24_3",
- visibility = ["//pkg/sql/schemachanger/scplan:__subpackages__"],
- deps = [
- "//pkg/clusterversion",
- "//pkg/sql/schemachanger/rel",
- "//pkg/sql/schemachanger/scpb",
- "//pkg/sql/schemachanger/scplan/internal/opgen",
- "//pkg/sql/schemachanger/scplan/internal/rules",
- "//pkg/sql/schemachanger/scplan/internal/scgraph",
- "//pkg/sql/schemachanger/screl",
- "//pkg/sql/sem/catid",
- "@com_github_cockroachdb_errors//:errors",
- ],
-)
-
-go_test(
- name = "release_24_3_test",
- srcs = [
- "assertions_test.go",
- "rules_test.go",
- ],
- data = glob(["testdata/**"]),
- embed = [":release_24_3"],
- deps = [
- "//pkg/sql/catalog/catpb",
- "//pkg/sql/schemachanger/rel",
- "//pkg/sql/schemachanger/scpb",
- "//pkg/sql/schemachanger/scplan/internal/opgen",
- "//pkg/sql/schemachanger/screl",
- "//pkg/sql/types",
- "//pkg/testutils/datapathutils",
- "@com_github_cockroachdb_datadriven//:datadriven",
- "@com_github_cockroachdb_errors//:errors",
- "@com_github_stretchr_testify//require",
- "@in_gopkg_yaml_v3//:yaml_v3",
- ],
-)
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/assertions_test.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/assertions_test.go
deleted file mode 100644
index f585f0b7eb0c..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/assertions_test.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "reflect"
- "runtime"
- "strings"
- "testing"
-
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/opgen"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl"
- "github.com/cockroachdb/cockroach/pkg/sql/types"
- "github.com/cockroachdb/errors"
-)
-
-// TestRuleAssertions verifies that important helper functions verify certain
-// properties that the rule definitions rely on.
-func TestRuleAssertions(t *testing.T) {
- for _, fn := range []func(e scpb.Element) error{
- checkSimpleDependentsReferenceDescID,
- checkToAbsentCategories,
- checkIsWithTypeT,
- checkIsWithExpression,
- checkIsColumnDependent,
- checkIsIndexDependent,
- checkIsConstraintDependent,
- checkConstraintPartitions,
- } {
- var fni interface{} = fn
- fullName := runtime.FuncForPC(reflect.ValueOf(fni).Pointer()).Name()
- nameParts := strings.Split(fullName, "rules.")
- shortName := nameParts[len(nameParts)-1]
- t.Run(shortName, func(t *testing.T) {
- _ = scpb.ForEachElementType(func(e scpb.Element) error {
- e = nonNilElement(e)
- if err := fn(e); err != nil {
- t.Errorf("%T: %+v", e, err)
- }
- return nil
- })
- })
- }
-}
-
-func nonNilElement(element scpb.Element) scpb.Element {
- return reflect.New(reflect.ValueOf(element).Type().Elem()).Interface().(scpb.Element)
-}
-
-// Assert that only simple dependents (non-descriptor, non-index, non-column)
-// and data elements have screl.ReferencedDescID attributes.
-// One exception is foreign key constraint, which is not simple dependent nor data
-// element but it has a screl.ReferencedDescID attribute.
-func checkSimpleDependentsReferenceDescID(e scpb.Element) error {
- if isSimpleDependent(e) || isData(e) {
- return nil
- }
- if _, ok := e.(*scpb.ForeignKeyConstraint); ok {
- return nil
- }
- if _, err := screl.Schema.GetAttribute(screl.ReferencedDescID, e); err == nil {
- return errors.New("unexpected screl.ReferencedDescID attr")
- }
- return nil
-}
-
-// Assert that elements can be grouped into three categories when transitioning
-// from PUBLIC to ABSENT:
-// - go via DROPPED iff they're descriptor or data elements;
-// - go via a non-read status iff they're indexes or columns, which are
-// subject to the two-version invariant;
-// - go direct to ABSENT in all other cases.
-func checkToAbsentCategories(e scpb.Element) error {
- s0 := opgen.InitialStatus(e, scpb.Status_ABSENT)
- s1 := opgen.NextStatus(e, scpb.Status_ABSENT, s0)
- switch s1 {
- case scpb.Status_DROPPED:
- if isDescriptor(e) || isData(e) {
- return nil
- }
- case scpb.Status_VALIDATED, scpb.Status_WRITE_ONLY, scpb.Status_DELETE_ONLY:
- if isSubjectTo2VersionInvariant(e) {
- return nil
- }
- case scpb.Status_ABSENT:
- if isSimpleDependent(e) {
- return nil
- }
- }
- return errors.Newf("unexpected transition %s -> %s in direction ABSENT", s0, s1)
-}
-
-// Assert that isWithTypeT covers all elements with embedded TypeTs.
-func checkIsWithTypeT(e scpb.Element) error {
- return screl.WalkTypes(e, func(t *types.T) error {
- if isWithTypeT(e) {
- return nil
- }
- return errors.New("should verify isWithTypeT but doesn't")
- })
-}
-
-// Assert that isWithExpression covers all elements with embedded
-// expressions.
-func checkIsWithExpression(e scpb.Element) error {
- return screl.WalkExpressions(e, func(t *catpb.Expression) error {
- switch e.(type) {
- // Ignore elements which have catpb.Expression fields but which don't
- // have them within an scpb.Expression for valid reasons.
- case *scpb.RowLevelTTL:
- return nil
- }
- if isWithExpression(e) {
- return nil
- }
- return errors.New("should verify isWithExpression but doesn't")
- })
-}
-
-// Assert that isColumnDependent covers all dependent elements of a column
-// element.
-func checkIsColumnDependent(e scpb.Element) error {
- // Exclude columns themselves.
- if isColumn(e) {
- return nil
- }
- // A column dependent should have a ColumnID attribute.
- _, err := screl.Schema.GetAttribute(screl.ColumnID, e)
- if isColumnDependent(e) {
- if err != nil {
- return errors.New("verifies isColumnDependent but doesn't have ColumnID attr")
- }
- } else if err == nil {
- return errors.New("has ColumnID attr but doesn't verify isColumnDependent")
- }
- return nil
-}
-
-// Assert that isIndexDependent covers all dependent elements of an index
-// element.
-func checkIsIndexDependent(e scpb.Element) error {
- // Exclude indexes themselves and their data.
- if isIndex(e) || isData(e) || isNonIndexBackedConstraint(e) {
- return nil
- }
- // An index dependent should have an IndexID attribute.
- _, err := screl.Schema.GetAttribute(screl.IndexID, e)
- if isIndexDependent(e) {
- if err != nil {
- return errors.New("verifies isIndexDependent but doesn't have IndexID attr")
- }
- } else if err == nil {
- return errors.New("has IndexID attr but doesn't verify isIndexDependent")
- }
- return nil
-}
-
-// Assert that checkIsConstraintDependent covers all elements of a constraint
-// element.
-func checkIsConstraintDependent(e scpb.Element) error {
- // Exclude constraints themselves.
- if isConstraint(e) {
- return nil
- }
- // A constraint dependent should have a ConstraintID attribute.
- _, err := screl.Schema.GetAttribute(screl.ConstraintID, e)
- if isConstraintDependent(e) {
- if err != nil {
- return errors.New("verifies isConstraintDependent but doesn't have ConstraintID attr")
- }
- } else if err == nil {
- return errors.New("has ConstraintID attr but doesn't verify isConstraintDependent")
- }
- return nil
-}
-
-// Assert the following partitions about constraints:
-// 1. An element `e` with ConstraintID attr is either a constraint
-// or a constraint dependent.
-// 2. A constraint is either index-backed or non-index-backed.
-//
-// TODO (xiang): Add test for cross-descriptor partition. We currently
-// cannot have them until we added referenced.*ID attr for
-// UniqueWithoutIndex[NotValid] element, which is required to support
-// partial unique without index constraint with a predicate that references
-// other descriptors.
-func checkConstraintPartitions(e scpb.Element) error {
- _, err := screl.Schema.GetAttribute(screl.ConstraintID, e)
- if err != nil {
- return nil //nolint:returnerrcheck
- }
- if !isConstraint(e) && !isConstraintDependent(e) {
- return errors.New("has ConstraintID attr but is not a constraint nor a constraint dependent")
- }
- if isConstraintDependent(e) {
- return nil
- }
- if !isNonIndexBackedConstraint(e) && !isIndex(e) {
- return errors.New("verifies isConstraint but does not verify isNonIndexBackedConstraint nor isIndex")
- }
- return nil
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_column.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_column.go
deleted file mode 100644
index cf2715ad970e..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_column.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl"
-)
-
-// These rules ensure that column-dependent elements, like a column's name, its
-// DEFAULT expression, etc. appear once the column reaches a suitable state.
-func init() {
-
- registerDepRule(
- "column existence precedes column dependents",
- scgraph.Precedence,
- "column", "dependent",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.Column)(nil)),
- to.TypeFilter(rulesVersionKey, isColumnDependent),
- JoinOnColumnID(from, to, "table-id", "col-id"),
- StatusesToPublicOrTransient(from, scpb.Status_DELETE_ONLY, to, scpb.Status_PUBLIC),
- }
- },
- )
-
- registerDepRule(
- "column dependents exist before column becomes public",
- scgraph.Precedence,
- "dependent", "column",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isColumnDependent),
- to.Type((*scpb.Column)(nil)),
- JoinOnColumnID(from, to, "table-id", "col-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_PUBLIC),
- }
- },
- )
-}
-
-// Special cases of the above.
-func init() {
- registerDepRule(
- "column name and type set right after column existence",
- scgraph.SameStagePrecedence,
- "column", "column-name-or-type",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.Column)(nil)),
- to.Type(
- (*scpb.ColumnName)(nil),
- (*scpb.ColumnType)(nil),
- ),
- StatusesToPublicOrTransient(from, scpb.Status_DELETE_ONLY, to, scpb.Status_PUBLIC),
- JoinOnColumnID(from, to, "table-id", "col-id"),
- }
- },
- )
-
- registerDepRule(
- "DEFAULT or ON UPDATE existence precedes writes to column",
- scgraph.Precedence,
- "expr", "column",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type(
- (*scpb.ColumnDefaultExpression)(nil),
- (*scpb.ColumnOnUpdateExpression)(nil),
- ),
- to.Type((*scpb.Column)(nil)),
- JoinOnColumnID(from, to, "table-id", "col-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_WRITE_ONLY),
- }
- },
- )
-
- // Column becomes writable in the same stage as column constraint is enforced.
- //
- // This rule exists to prevent the case that the constraint becomes enforced
- // (which means writes need to honor it) when the column itself is still
- // in DELETE_ONLY and thus not visible to writes.
- //
- // N.B. It's essentially the same rule as "column constraint removed right
- // before column reaches delete only" but on the adding path.
- // N.B. SameStage is enough; which transition happens first won't matter.
- registerDepRule(
- "column writable right before column constraint is enforced.",
- scgraph.SameStagePrecedence,
- "column", "column-constraint",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.Column)(nil)),
- to.Type((*scpb.ColumnNotNull)(nil)),
- JoinOnColumnID(from, to, "table-id", "col-id"),
- StatusesToPublicOrTransient(from, scpb.Status_WRITE_ONLY, to, scpb.Status_WRITE_ONLY),
- }
- },
- )
-
-}
-
-// This rule ensures that columns depend on each other in increasing order.
-func init() {
- registerDepRule(
- "ensure columns are in increasing order",
- scgraph.Precedence,
- "later-column", "earlier-column",
- func(from, to NodeVars) rel.Clauses {
- status := rel.Var("status")
- return rel.Clauses{
- from.Type((*scpb.Column)(nil)),
- // Join first on the target and node to only explore all columns
- // which are being added as opposed to all columns. If we joined
- // first on the columns, we'd be filtering the cross product of
- // table columns. If a relation has a lot of columns, this can hurt.
- // It's less likely that we have a very large number of columns which
- // are being added. We'll want to do something else here when we start
- // creating tables and all the columns are being added.
- //
- // The "right" answer is to push ordering predicates into rel; it also
- // is maintaining sorted data structures.
- from.JoinTargetNode(),
- to.Type((*scpb.Column)(nil)),
- JoinOnDescID(from, to, "table-id"),
- ToPublicOrTransient(from, to),
- status.In(scpb.Status_WRITE_ONLY, scpb.Status_PUBLIC),
- status.Entities(screl.CurrentStatus, from.Node, to.Node),
- FilterElements("SmallerColumnIDFirst", from, to, func(from, to *scpb.Column) bool {
- return from.ColumnID < to.ColumnID
- }),
- }
- })
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_constraint.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_constraint.go
deleted file mode 100644
index 57f35a0e0d83..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_constraint.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl"
-)
-
-// These rules ensure that constraint-dependent elements, like a constraint's
-// name, etc. appear once the constraint reaches a suitable state.
-func init() {
- registerDepRule(
- "constraint dependent public right before complex constraint",
- scgraph.SameStagePrecedence,
- "dependent", "complex-constraint",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isConstraintDependent, Not(isConstraintWithoutIndexName)),
- to.TypeFilter(rulesVersionKey, isNonIndexBackedConstraint, isSubjectTo2VersionInvariant),
- JoinOnConstraintID(from, to, "table-id", "constraint-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_PUBLIC),
- }
- },
- )
-
- registerDepRule(
- "simple constraint public right before its dependents",
- scgraph.SameStagePrecedence,
- "simple-constraint", "dependent",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isNonIndexBackedConstraint, Not(isNonIndexBackedCrossDescriptorConstraint)),
- to.TypeFilter(rulesVersionKey, isConstraintDependent),
- JoinOnConstraintID(from, to, "table-id", "constraint-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_PUBLIC),
- }
- },
- )
-
- // Constraint name should be assigned right before it becomes visible, otherwise
- // we won't have the correct message inside errors.
- registerDepRule(
- "simple constraint visible before name",
- scgraph.Precedence,
- "simple-constraint", "constraint-name",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isNonIndexBackedConstraint),
- to.TypeFilter(rulesVersionKey, isConstraintWithoutIndexName),
- JoinOnConstraintID(from, to, "table-id", "constraint-id"),
- StatusesToPublicOrTransient(from, scpb.Status_WRITE_ONLY, to, scpb.Status_PUBLIC),
- }
- },
- )
-
- registerDepRule(
- "column public before non-index-backed constraint (including hash-sharded) is created",
- scgraph.Precedence,
- "column", "constraint",
- func(from, to NodeVars) rel.Clauses {
- colID := rel.Var("columnID")
- return rel.Clauses{
- from.Type((*scpb.Column)(nil)),
- to.TypeFilter(rulesVersionKey, isNonIndexBackedConstraint),
- from.El.AttrEqVar(screl.ColumnID, colID),
- to.ReferencedColumnIDsContains(colID),
- JoinOnDescID(from, to, "table-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_WRITE_ONLY),
- }
- },
- )
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_index.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_index.go
deleted file mode 100644
index 9e8660c19c03..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_index.go
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl"
-)
-
-// These rules ensure that index-dependent elements, like an index's name, its
-// partitioning, etc. appear once the index reaches a suitable state.
-func init() {
-
- registerDepRule(
- "index existence precedes index dependents",
- scgraph.Precedence,
- "index", "dependent",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type(
- (*scpb.PrimaryIndex)(nil),
- (*scpb.SecondaryIndex)(nil),
- ),
- to.TypeFilter(rulesVersionKey, isIndexDependent),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- StatusesToPublicOrTransient(from, scpb.Status_BACKFILL_ONLY, to, scpb.Status_PUBLIC),
- }
- },
- )
-
- registerDepRule(
- "temp index existence precedes index dependents",
- scgraph.Precedence,
- "index", "dependent",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.TemporaryIndex)(nil)),
- to.TypeFilter(rulesVersionKey, isIndexDependent),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- StatusesToPublicOrTransient(from, scpb.Status_DELETE_ONLY, to, scpb.Status_PUBLIC),
- }
- },
- )
-
- registerDepRule(
- "index dependents exist before index becomes public",
- scgraph.Precedence,
- "dependent", "index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isIndexDependent),
- to.TypeFilter(rulesVersionKey, isIndex),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_PUBLIC),
- }
- },
- )
-}
-
-// Special cases of the above.
-func init() {
-
- registerDepRule(
- "primary index named right before index becomes public",
- scgraph.SameStagePrecedence,
- "index-name", "index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.IndexName)(nil)),
- to.Type(
- (*scpb.PrimaryIndex)(nil),
- ),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_PUBLIC),
- }
- },
- )
- registerDepRule(
- "secondary index named before public (with index swap)",
- scgraph.Precedence,
- "index", "index-name",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- to.Type((*scpb.IndexName)(nil)),
- from.Type(
- (*scpb.SecondaryIndex)(nil),
- ),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- StatusesToPublicOrTransient(from, scpb.Status_VALIDATED, to, scpb.Status_PUBLIC),
- rel.And(IsPotentialSecondaryIndexSwap("index-id", "table-id")...),
- }
- },
- )
-
- registerDepRule(
- "secondary index named before validation (without index swap)",
- scgraph.Precedence,
- "index-name", "index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.IndexName)(nil)),
- to.Type(
- (*scpb.SecondaryIndex)(nil),
- ),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- IsNotPotentialSecondaryIndexSwap("table-id", "index-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_VALIDATED),
- }
- },
- )
-}
-
-// This rule ensures that primary indexes and their corresponding temporary
-// indexes appear in an appropriate order to correctly support index backfilling.
-func init() {
-
- // Offline-backfilled index can begin backfilling after the corresponding
- // temporary index exists in WRITE_ONLY.
- registerDepRule(
- "temp index is WRITE_ONLY before backfill",
- scgraph.Precedence,
- "temp", "index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.TemporaryIndex)(nil)),
- to.Type((*scpb.PrimaryIndex)(nil), (*scpb.SecondaryIndex)(nil)),
- JoinOnDescID(from, to, "table-id"),
- JoinOn(
- from, screl.IndexID,
- to, screl.TemporaryIndexID,
- "temp-index-id",
- ),
- from.TargetStatus(scpb.Transient),
- to.TargetStatus(scpb.ToPublic, scpb.Transient),
- from.CurrentStatus(scpb.Status_WRITE_ONLY),
- to.CurrentStatus(scpb.Status_BACKFILLED),
- }
- },
- )
-
- // The following two rules together ensure that temporary index is dropped
- // after its master index has merged its data (MERGED) and before its master
- // index advances into the next status (WRITE_ONLY).
-
- // Temporary index starts to disappear after its master index has merged
- // this temporary index's data.
- registerDepRule(
- "index is MERGED before its temp index starts to disappear",
- scgraph.Precedence,
- "index", "temp",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.PrimaryIndex)(nil), (*scpb.SecondaryIndex)(nil)),
- to.Type((*scpb.TemporaryIndex)(nil)),
- JoinOnDescID(from, to, "table-id"),
- JoinOn(
- from, screl.TemporaryIndexID,
- to, screl.IndexID,
- "temp-index-id",
- ),
- from.TargetStatus(scpb.ToPublic, scpb.Transient),
- from.CurrentStatus(scpb.Status_MERGED),
- to.TargetStatus(scpb.Transient),
- to.CurrentStatus(scpb.Status_TRANSIENT_DELETE_ONLY),
- }
- },
- )
-
- // Temporary index disappeared before its master index reaches WRITE_ONLY.
- registerDepRule(
- "temp index disappeared before its master index reaches WRITE_ONLY",
- scgraph.Precedence,
- "temp", "index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.TemporaryIndex)(nil)),
- to.Type((*scpb.PrimaryIndex)(nil), (*scpb.SecondaryIndex)(nil)),
- JoinOnDescID(from, to, "table-id"),
- JoinOn(
- from, screl.IndexID,
- to, screl.TemporaryIndexID,
- "temp-index-id",
- ),
- from.TargetStatus(scpb.Transient),
- from.CurrentStatus(scpb.Status_TRANSIENT_DELETE_ONLY),
- to.TargetStatus(scpb.ToPublic, scpb.Transient),
- to.CurrentStatus(scpb.Status_WRITE_ONLY),
- }
- },
- )
-}
-
-// We want to say that all columns which are part of a secondary index need
-// to be in a primary index which is validated
-// To do that, we want to find a secondary index which has a source which
-// is a primary index which is itself new.
-func init() {
-
- registerDepRule(
- "primary index with new columns should exist before secondary indexes",
- scgraph.Precedence,
- "primary-index", "secondary-index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.PrimaryIndex)(nil)),
- to.Type((*scpb.SecondaryIndex)(nil)),
- JoinOnDescID(from, to, "table-id"),
- JoinOn(
- from, screl.IndexID,
- to, screl.SourceIndexID,
- "primary-index-id",
- ),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_BACKFILL_ONLY),
- }
- })
-
- registerDepRule(
- "primary index with new columns should exist before temp indexes",
- scgraph.Precedence,
- "primary-index", "temp-index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.PrimaryIndex)(nil)),
- to.Type((*scpb.TemporaryIndex)(nil)),
- JoinOnDescID(from, to, "table-id"),
- JoinOn(
- from, screl.IndexID,
- to, screl.SourceIndexID,
- "primary-index-id",
- ),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_DELETE_ONLY),
- }
- })
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_index_and_column.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_index_and_column.go
deleted file mode 100644
index 37316b92aff7..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_index_and_column.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
-)
-
-// These rules ensure that columns and indexes containing these columns
-// appear into existence in the correct order.
-func init() {
-
- // We need to make sure that no columns are added to the index after it
- // receives any data due to a backfill.
- registerDepRule("index-column added to index before index is backfilled",
- scgraph.Precedence,
- "index-column", "index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.IndexColumn)(nil)),
- to.Type((*scpb.PrimaryIndex)(nil), (*scpb.SecondaryIndex)(nil)),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_BACKFILLED),
- }
- })
-
- // We need to make sure that no columns are added to the temp index after it
- // receives any writes.
- registerDepRule("index-column added to index before temp index receives writes",
- scgraph.Precedence,
- "index-column", "index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.IndexColumn)(nil)),
- to.Type((*scpb.TemporaryIndex)(nil)),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- StatusesTransient(from, scpb.Status_PUBLIC, to, scpb.Status_WRITE_ONLY),
- }
- })
-
- registerDepRule(
- "column existence precedes index existence",
- scgraph.Precedence,
- "column", "index",
- func(from, to NodeVars) rel.Clauses {
- ic := MkNodeVars("index-column")
- relationID, columnID := rel.Var("table-id"), rel.Var("column-id")
- return rel.Clauses{
- from.Type((*scpb.Column)(nil)),
- to.Type((*scpb.PrimaryIndex)(nil), (*scpb.SecondaryIndex)(nil)),
- JoinOnColumnID(from, ic, relationID, columnID),
- ColumnInIndex(ic, to, relationID, columnID, "index-id"),
- StatusesToPublicOrTransient(from, scpb.Status_DELETE_ONLY, to, scpb.Status_BACKFILL_ONLY),
- }
- },
- )
-
- registerDepRule(
- "column existence precedes temp index existence",
- scgraph.Precedence,
- "column", "index",
- func(from, to NodeVars) rel.Clauses {
- ic := MkNodeVars("index-column")
- relationID, columnID := rel.Var("table-id"), rel.Var("column-id")
- return rel.Clauses{
- from.Type((*scpb.Column)(nil)),
- to.Type((*scpb.TemporaryIndex)(nil)),
- JoinOnColumnID(ic, from, relationID, columnID),
- ColumnInIndex(ic, to, relationID, columnID, "index-id"),
- StatusesToPublicOrTransient(from, scpb.Status_DELETE_ONLY, to, scpb.Status_DELETE_ONLY),
- }
- },
- )
-
- // We need to ensure that the temporary index has all the relevant writes
- // to any columns it contains. We ensure elsewhere that any index which
- // will later be merged with the temporary index is not backfilled until
- // that temporary index is receiving writes. This rule ensures that those
- // write operations contain data for all columns.
- registerDepRule(
- "column is WRITE_ONLY before temporary index is WRITE_ONLY",
- scgraph.Precedence,
- "column", "index",
- func(from, to NodeVars) rel.Clauses {
- ic := MkNodeVars("index-column")
- relationID, columnID := rel.Var("table-id"), rel.Var("column-id")
- return rel.Clauses{
- from.El.Type((*scpb.Column)(nil)),
- to.El.Type((*scpb.TemporaryIndex)(nil)),
- JoinOnColumnID(ic, from, relationID, columnID),
- ColumnInIndex(ic, to, relationID, columnID, "index-id"),
- StatusesToPublicOrTransient(from, scpb.Status_WRITE_ONLY, to, scpb.Status_WRITE_ONLY),
- }
- },
- )
-
- registerDepRule(
- "swapped primary index public before column",
- scgraph.Precedence,
- "index", "column",
- func(from, to NodeVars) rel.Clauses {
- ic := MkNodeVars("index-column")
- relationID, columnID := rel.Var("table-id"), rel.Var("column-id")
- return rel.Clauses{
- from.Type((*scpb.PrimaryIndex)(nil)),
- to.Type((*scpb.Column)(nil)),
- ColumnInSwappedInPrimaryIndex(ic, from, relationID, columnID, "index-id"),
- JoinOnColumnID(ic, to, relationID, columnID),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_PUBLIC),
- }
- },
- )
-
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_index_and_constraint.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_index_and_constraint.go
deleted file mode 100644
index dbce06fa39fc..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_add_index_and_constraint.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl"
-)
-
-// These rules ensure that indexes and constraints on those indexes come
-// to existence in the appropriate order.
-func init() {
- registerDepRule(
- "index is ready to be validated before we validate constraint on it",
- scgraph.Precedence,
- "index", "constraint",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.PrimaryIndex)(nil)),
- to.TypeFilter(rulesVersionKey, isNonIndexBackedConstraint, isSubjectTo2VersionInvariant),
- JoinOnDescID(from, to, "table-id"),
- JoinOn(
- from, screl.IndexID,
- to, screl.IndexID,
- "index-id-for-validation",
- ),
- StatusesToPublicOrTransient(from, scpb.Status_VALIDATED, to, scpb.Status_VALIDATED),
- }
- },
- )
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_alter_column_type.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_alter_column_type.go
deleted file mode 100644
index 0fcca7eac958..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_alter_column_type.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl"
-)
-
-// This rule ensures that when changing the column type, that the old column
-// type is dropped before the new type is added.
-func init() {
- registerDepRule(
- "column type update is decomposed as a drop then add",
- scgraph.Precedence,
- "old-column-type", "new-column-type",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.ColumnType)(nil)),
- to.Type((*scpb.ColumnType)(nil)),
- JoinOnColumnID(from, to, "table-id", "col-id"),
- from.TargetStatus(scpb.ToAbsent),
- from.CurrentStatus(scpb.Status_PUBLIC),
- to.TargetStatus(scpb.ToPublic),
- to.CurrentStatus(scpb.Status_ABSENT),
- }
- },
- )
-
- registerDepRule(
- "column type is changed to public after doing validation of a transient check constraint",
- scgraph.SameStagePrecedence,
- "transient-check-constraint", "column-type",
- func(from, to NodeVars) rel.Clauses {
- colID := rel.Var("columnID")
- return rel.Clauses{
- from.Type((*scpb.CheckConstraint)(nil)),
- to.Type((*scpb.ColumnType)(nil)),
- JoinOnDescID(from, to, "table-id"),
- to.El.AttrEqVar(screl.ColumnID, colID),
- from.ReferencedColumnIDsContains(colID),
- from.TargetStatus(scpb.Transient),
- to.TargetStatus(scpb.ToPublic),
- from.CurrentStatus(scpb.Status_TRANSIENT_VALIDATED),
- to.CurrentStatus(scpb.Status_PUBLIC),
- }
- },
- )
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_create.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_create.go
deleted file mode 100644
index 54b3590a256f..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_create.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2023 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
-)
-
-func init() {
- registerDepRule(
- "descriptor existence precedes dependents",
- scgraph.Precedence,
- "relation", "dependent",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isDescriptor),
- to.TypeFilter(rulesVersionKey, Not(isDescriptor)),
- JoinOnDescID(from, to, "relation-id"),
- StatusesToPublicOrTransient(from, scpb.Status_DESCRIPTOR_ADDED, to, scpb.Status_PUBLIC),
- }
- },
- )
-
- registerDepRule(
- "dependents exist before descriptor becomes public",
- scgraph.Precedence,
- "dependent", "relation",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, Not(isDescriptor), Not(isData)),
- to.TypeFilter(rulesVersionKey, isDescriptor),
- JoinOnDescID(from, to, "relation-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_PUBLIC),
- }
- },
- )
-}
-
-func init() {
- registerDepRule(
- "namespace exist before schema parent",
- scgraph.Precedence,
- "dependent", "relation",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.Namespace)(nil)),
- to.Type((*scpb.SchemaParent)(nil)),
- JoinOnDescID(from, to, "schema-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_PUBLIC),
- }
- },
- )
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_create_function.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_create_function.go
deleted file mode 100644
index 80cab0590704..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_create_function.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2023 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
-)
-
-func init() {
- // When setting object parent ids, we need to add the function to schema, a
- // function name is needed for this.
- registerDepRule(
- "function name should be set before parent ids",
- scgraph.Precedence,
- "function-name", "function-parent",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.FunctionName)(nil)),
- to.Type((*scpb.SchemaChild)(nil)),
- JoinOnDescID(from, to, "function-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_PUBLIC),
- }
- },
- )
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_column.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_column.go
deleted file mode 100644
index 194fbbd6cb07..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_column.go
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
-)
-
-// These rules ensure that column-dependent elements, like a column's name, its
-// DEFAULT expression, etc. disappear once the column reaches a suitable state.
-func init() {
-
- // N.B. This rules is superseded by the "column constraint removed right before
- // column reaches write only" rule below for the not null column check.
- registerDepRuleForDrop(
- "column no longer public before dependents",
- scgraph.Precedence,
- "column", "dependent",
- scpb.Status_WRITE_ONLY, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.Column)(nil)),
- to.TypeFilter(rulesVersionKey, isColumnDependent, Not(isColumnNotNull)),
- JoinOnColumnID(from, to, "table-id", "col-id"),
- }
- },
- )
-
- registerDepRuleForDrop(
- "dependents removed before column",
- scgraph.Precedence,
- "dependent", "column",
- scpb.Status_ABSENT, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isColumnDependent),
- to.Type((*scpb.Column)(nil)),
- JoinOnColumnID(from, to, "table-id", "col-id"),
- }
- },
- )
-
- registerDepRule(
- "column type removed before column family",
- scgraph.Precedence,
- "column-type", "column-family",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.ColumnType)(nil)),
- to.Type((*scpb.ColumnFamily)(nil)),
- JoinOnColumnFamilyID(from, to, "table-id", "family-id"),
- StatusesToAbsent(from, scpb.Status_ABSENT, to, scpb.Status_ABSENT),
- }
- },
- )
-}
-
-// Special cases of the above.
-func init() {
-
- registerDepRule(
- "column type dependents removed right before column type",
- scgraph.SameStagePrecedence,
- "dependent", "column-type",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isColumnTypeDependent),
- to.Type((*scpb.ColumnType)(nil)),
- JoinOnColumnID(from, to, "table-id", "col-id"),
- StatusesToAbsent(from, scpb.Status_ABSENT, to, scpb.Status_ABSENT),
- }
- },
- )
-
- // Special cases for removal of column types, which hold references to other
- // descriptors.
- //
- // When the whole table is dropped, we can (and in fact, should) remove these
- // right away in-txn. However, when only the column is dropped but the table
- // remains, we need to wait until the column is DELETE_ONLY, which happens
- // post-commit because of the need to uphold the 2-version invariant.
- //
- // We distinguish the two cases using a flag in ColumnType which is set iff
- // the parent relation is dropped. This is a dirty hack, ideally we should be
- // able to express the _absence_ of a target element as a query clause.
- //
- // Note that DEFAULT and ON UPDATE expressions are column-dependent elements
- // which also hold references to other descriptors. The rule prior to this one
- // ensures that they transition to ABSENT before scpb.ColumnType does.
- registerDepRule(
- "column type removed right before column when not dropping relation",
- scgraph.SameStagePrecedence,
- "column-type", "column",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.ColumnType)(nil)),
- descriptorIsNotBeingDropped(from.El),
- to.Type((*scpb.Column)(nil)),
- JoinOnColumnID(from, to, "table-id", "col-id"),
- StatusesToAbsent(from, scpb.Status_ABSENT, to, scpb.Status_ABSENT),
- }
- },
- )
-
- // This rule ensures that a column is dropped only after any computed column
- // dependent on it is dropped. For example, if column B is a computed column
- // using column A in its compute expression, this rule ensures that the
- // compute expression of B is dropped before column A is dropped. The rules
- // above ensure that column B is dropped before the expression is dropped,
- // so this rule also implicitly implies that column B is dropped before column
- // A. This is relevant for expression and hash indexes which create an
- // internal, virtual column that computes the hash/expression key for the index.
- //
- // N.B. Originally, this rule was specific only to virtual, computed columns.
- // The rationale was that it was needed due to an edge case within the
- // optimizer. The optimizer allows the compute expression of virtual computed
- // columns to be evaluated during an active schema change. Without this rule,
- // the optimizer cannot read the dependent column as the dependent column
- // moves to the WRITE_ONLY stage before the computed column is fully dropped.
- //
- // However, it is now needed for all compute expressions. When altering a
- // column's type such that a backfill is required, a new version of the column
- // is added, and the old version is dropped. A temporary compute expression is
- // set to map the old rows to the new column type. This expression is dropped
- // *before* dropping the old column, which this rule helps to enforce.
- registerDepRuleForDrop(
- "Computed column expression is dropped before the column it depends on",
- scgraph.Precedence,
- "column-expr", "column",
- scpb.Status_ABSENT, scpb.Status_WRITE_ONLY,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.ColumnComputeExpression)(nil)),
- to.Type((*scpb.Column)(nil)),
- JoinOnDescID(from, to, "table-id"),
- FilterElements("computedColumnTypeReferencesColumn", from, to,
- func(computeExpression *scpb.ColumnComputeExpression, column *scpb.Column) bool {
- for _, refColumns := range computeExpression.ReferencedColumnIDs {
- if refColumns == column.ColumnID {
- return true
- }
- }
- return false
- }),
- }
- },
- )
-
- // Column constraint disappears in the same stage as the column
- // becomes WRITE_ONLY.
- //
- // Column constraint cannot disappear while the column is still publicly writable
- // because we then allow incorrect writes that would violate the constraint.
- //
- // Column constraint cannot still be enforced when the column becomes
- // non-public because an enforced constraint means writes will see and
- // attempt to uphold it but the column is no longer visible to them.
- //
- // N.B. This rule supersedes the above "dependents removed before column" rule.
- // N.B. SameStage is enough; which transition happens first won't matter.
- registerDepRuleForDrop(
- "column constraint removed right before column reaches write only",
- scgraph.Precedence,
- "column-constraint", "column",
- scpb.Status_ABSENT, scpb.Status_WRITE_ONLY,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isNonIndexBackedConstraint, isSubjectTo2VersionInvariant),
- to.Type((*scpb.Column)(nil)),
- JoinOnColumnID(from, to, "table-id", "col-id"),
- }
- },
- )
-
- // This rule enforces that a new primary index moves to the public stage only after all columns stored
- // within the old primary index move to WRITE_ONLY. Without this, the new primary index is at risk of not
- // storing all public columns within the table (as the column being dropped is still considered public
- // before it moves to WRITE_ONLY but the new primary index does not contain it since the schema changer
- // knows it is transitioning to a target status of ABSENT).
- registerDepRule(
- "New primary index should go public only after columns being dropped move to WRITE_ONLY",
- scgraph.Precedence,
- "column", "new-primary-index",
- func(from, to NodeVars) rel.Clauses {
- ic := MkNodeVars("index-column")
- relationID, columnID, indexID := rel.Var("table-id"), rel.Var("column-id"), rel.Var("index-id")
- return rel.Clauses{
- from.Type((*scpb.Column)(nil)),
- to.Type((*scpb.PrimaryIndex)(nil)),
- ColumnInSourcePrimaryIndex(ic, to, relationID, columnID, indexID),
- JoinOnColumnID(ic, from, relationID, columnID),
- from.TargetStatus(scpb.ToAbsent),
- from.CurrentStatus(scpb.Status_WRITE_ONLY),
- to.TargetStatus(scpb.ToPublic),
- to.CurrentStatus(scpb.Status_PUBLIC),
- }
- },
- )
-}
-
-// Special rules partial predicate expressions, which ensure that any columns
-// used by them are not cleaned up before the partial index peredicate is
-// removed.
-func init() {
- registerDepRuleForDrop(
- "secondary index partial no longer public before referenced column",
- scgraph.Precedence,
- "secondary-partial-index", "column",
- scpb.Status_DELETE_ONLY, scpb.Status_WRITE_ONLY,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.SecondaryIndex)(nil)),
- to.Type((*scpb.Column)(nil)),
- JoinOnDescID(from, to, "table-id"),
- descriptorIsNotBeingDropped(from.El),
- FilterElements("secondaryIndexReferencesColumn", from, to,
- func(index *scpb.SecondaryIndex, column *scpb.Column) bool {
- if index.EmbeddedExpr == nil {
- return false
- }
- for _, refColumns := range index.EmbeddedExpr.ReferencedColumnIDs {
- if refColumns == column.ColumnID {
- return true
- }
- }
- return false
- }),
- }
- },
- )
-
- registerDepRuleForDrop(
- "secondary index partial no longer public before referenced column",
- scgraph.Precedence,
- "secondary-partial-index", "column",
- scpb.Status_ABSENT, scpb.Status_WRITE_ONLY,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.SecondaryIndexPartial)(nil)),
- to.Type((*scpb.Column)(nil)),
- JoinOnDescID(from, to, "table-id"),
- descriptorIsNotBeingDropped(from.El),
- FilterElements("secondaryIndexReferencesColumn", from, to,
- func(index *scpb.SecondaryIndexPartial, column *scpb.Column) bool {
- for _, refColumns := range index.ReferencedColumnIDs {
- if refColumns == column.ColumnID {
- return true
- }
- }
- return false
- }),
- }
- },
- )
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_constraint.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_constraint.go
deleted file mode 100644
index 90e64b3c01f0..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_constraint.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
-)
-
-// These rules ensure that constraint-dependent elements, like an constraint's
-// name, etc. disappear once the constraint reaches a suitable state.
-func init() {
-
- registerDepRuleForDrop(
- "constraint no longer public before dependents",
- scgraph.Precedence,
- "constraint", "dependent",
- scpb.Status_VALIDATED, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isNonIndexBackedConstraint, isSubjectTo2VersionInvariant),
- to.TypeFilter(rulesVersionKey, isConstraintDependent, Not(isConstraintWithoutIndexName)),
- JoinOnConstraintID(from, to, "table-id", "constraint-id"),
- }
- },
- )
-
- registerDepRuleForDrop(
- "dependents removed before constraint",
- scgraph.Precedence,
- "dependents", "constraint",
- scpb.Status_ABSENT, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isConstraintDependent, Not(isConstraintWithoutIndexName)),
- to.TypeFilter(rulesVersionKey, isNonIndexBackedConstraint, isSubjectTo2VersionInvariant),
- JoinOnConstraintID(from, to, "table-id", "constraint-id"),
- }
- },
- )
-}
-
-// These rules apply to simple constraints and ensure that their dependents, like
-// their names, comments, etc., disappear right before the simple constraint.
-func init() {
-
- registerDepRuleForDrop(
- "dependents removed right before simple constraint",
- scgraph.SameStagePrecedence,
- "dependents", "constraint",
- scpb.Status_ABSENT, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isConstraintDependent, Not(isConstraintWithoutIndexName)),
- to.TypeFilter(rulesVersionKey, isNonIndexBackedConstraint, Not(isSubjectTo2VersionInvariant)),
- JoinOnConstraintID(from, to, "table-id", "constraint-id"),
- }
- },
- )
-
- // Constraint name should be cleared right before the constraint is no
- // longer visible.
- registerDepRuleForDrop(
- "Constraint should be hidden before name",
- scgraph.Precedence,
- "constraint-name", "constraint",
- scpb.Status_ABSENT, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.ConstraintWithoutIndexName)(nil)),
- to.TypeFilter(rulesVersionKey, isNonIndexBackedConstraint),
- JoinOnConstraintID(from, to, "table-id", "constraint-id"),
- }
- },
- )
- // Constraint should be validated before the constraint name is attempted
- // to be cleaned.
- registerDepRuleForDrop(
- "Constraint should be hidden before name",
- scgraph.Precedence,
- "constraint", "constraint-name",
- scpb.Status_VALIDATED, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isNonIndexBackedConstraint),
- to.Type((*scpb.ConstraintWithoutIndexName)(nil)),
- JoinOnConstraintID(from, to, "table-id", "constraint-id"),
- }
- },
- )
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_index.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_index.go
deleted file mode 100644
index 4aafe10ace8b..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_index.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
-)
-
-// These rules ensure that index-dependent elements, like an index's name, its
-// partitioning, etc. disappear once the index reaches a suitable state.
-func init() {
-
- // For a column to be removed from an index, the index must be validated,
- // which will not happen for temporary ones.
- registerDepRuleForDrop(
- "index no longer public before dependents, excluding columns",
- scgraph.Precedence,
- "index", "dependent",
- scpb.Status_VALIDATED, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isIndex),
- to.TypeFilter(rulesVersionKey, isIndexDependent, Not(isIndexColumn)),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- }
- },
- )
- // For temporary indexes we have to wait till DELETE_ONLY for primary index
- // swaps, as the temporary index transitions into a drop state. Normally these
- // get optimized out, so it should be safe to wait longer for all index types.
- registerDepRuleForDrop(
- "index drop mutation visible before cleaning up index columns",
- scgraph.Precedence,
- "index", "dependent",
- scpb.Status_DELETE_ONLY, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isIndex),
- to.TypeFilter(rulesVersionKey, isIndexColumn),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- }
- },
- )
- registerDepRuleForDrop(
- "dependents removed before index",
- scgraph.Precedence,
- "dependent", "index",
- scpb.Status_ABSENT, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isIndexDependent),
- to.TypeFilter(rulesVersionKey, isIndex),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- }
- },
- )
-
- // This rule helps us to have the index name inside event log entries.
- registerDepRuleForDrop(
- "index no longer public before index name",
- scgraph.Precedence,
- "index", "name",
- scpb.Status_DELETE_ONLY, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.SecondaryIndex)(nil)),
- to.Type((*scpb.IndexName)(nil)),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- }
- },
- )
-}
-
-// Special cases of the above.
-func init() {
-
- // If we're going to be removing columns from an index, we know that
- // it'll be because we're dropping the index. If we're dropping the
- // index and not the descriptor, we need to make sure that we only
- // do it once the index is definitely being dropped. The reason for
- // this is roundabout: dropping a column from an index which is itself
- // being dropped is treated as a no-op by the op rules.
- //
- // TODO(ajwerner): This rule really feels like it ought to be a
- // same stage precedence sort of rule where we remove the columns from the
- // index when we remove the index, but for some reason, that overconstrains
- // the graph when dropping the table. Because of that, we allow the column
- // to be removed from the index in DELETE_ONLY, and we no-op the removal.
- registerDepRuleForDrop(
- "remove columns from index right before removing index",
- scgraph.Precedence,
- "index-column", "index",
- scpb.Status_DELETE_ONLY, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.IndexColumn)(nil)),
- to.TypeFilter(rulesVersionKey, isIndex),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- }
- },
- )
-
- // Special case for removal of partial predicates, which hold references to
- // other descriptors.
- //
- // When the whole table is dropped, we can (and in fact, should) remove these
- // right away in-txn. However, when only the index is dropped but the table
- // remains, we need to wait until the index is DELETE_ONLY, which happens
- // post-commit because of the need to uphold the 2-version invariant.
- //
- // We distinguish the two cases using a flag in SecondaryIndexPartial which is
- // set iff the parent relation is dropped. This is a dirty hack, ideally we
- // should be able to express the _absence_ of a target element as a query
- // clause.
- registerDepRuleForDrop(
- "partial predicate removed right before secondary index when not dropping relation",
- scgraph.SameStagePrecedence,
- "partial-predicate", "index",
- scpb.Status_ABSENT, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.SecondaryIndexPartial)(nil)),
- descriptorIsNotBeingDropped(from.El),
- to.Type((*scpb.SecondaryIndex)(nil)),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- }
- },
- )
-
-}
-
-func init() {
- // TODO(fqazi): We need to model these rules better to use indexes,
- // since they may perform terrible in scenarios where we are dropping
- // a large number of views and indexes (i.e. O(views * indexes) ).
- registerDepRuleForDrop(
- "dependent view no longer public before secondary index",
- scgraph.Precedence,
- "view", "index",
- scpb.Status_DROPPED, scpb.Status_VALIDATED,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.View)(nil)),
- to.Type((*scpb.SecondaryIndex)(nil)),
- FilterElements("viewReferencesIndex", from, to, func(from *scpb.View, to *scpb.SecondaryIndex) bool {
- for _, ref := range from.ForwardReferences {
- if ref.ToID == to.TableID &&
- ref.IndexID == to.IndexID {
- return true
- }
- }
- return false
- }),
- }
- },
- )
- registerDepRuleForDrop(
- "secondary index should be validated before dependent view can be absent",
- scgraph.Precedence,
- "index", "view",
- scpb.Status_VALIDATED, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.SecondaryIndex)(nil)),
- to.Type((*scpb.View)(nil)),
- FilterElements("viewReferencesIndex", from, to, func(from *scpb.SecondaryIndex, to *scpb.View) bool {
- for _, ref := range to.ForwardReferences {
- if ref.ToID == from.TableID &&
- ref.IndexID == from.IndexID {
- return true
- }
- }
- return false
- }),
- }
- },
- )
- registerDepRuleForDrop(
- "dependent view absent before secondary index",
- scgraph.Precedence,
- "view", "index",
- scpb.Status_ABSENT, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.View)(nil)),
- to.Type((*scpb.SecondaryIndex)(nil)),
- FilterElements("viewReferencesIndex", from, to, func(from *scpb.View, to *scpb.SecondaryIndex) bool {
- for _, ref := range from.ForwardReferences {
- if ref.ToID == to.TableID &&
- ref.IndexID == to.IndexID {
- return true
- }
- }
- return false
- }),
- }
- },
- )
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_index_and_column.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_index_and_column.go
deleted file mode 100644
index 6edc1a4b5c85..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_index_and_column.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
-)
-
-// These rules ensure that columns and indexes containing these columns
-// disappear from existence in the correct order.
-func init() {
-
- // Without this rule, we might have an index which exists and contains
- // a column which does not exist. This would lead to panics inside the
- // optimizer and an invalid table descriptor.
- registerDepRuleForDrop("indexes containing column reach absent before column",
- scgraph.Precedence,
- "index", "column",
- scpb.Status_ABSENT, scpb.Status_ABSENT,
- func(from, to NodeVars) rel.Clauses {
- ic := MkNodeVars("index-column")
- relationID, columnID := rel.Var("table-id"), rel.Var("column-id")
- return rel.Clauses{
- from.Type((*scpb.PrimaryIndex)(nil), (*scpb.SecondaryIndex)(nil)),
- to.Type((*scpb.Column)(nil)),
- ColumnInIndex(ic, from, relationID, columnID, "index-id"),
- JoinOnColumnID(ic, to, relationID, columnID),
- descriptorIsNotBeingDropped(ic.El),
- }
- })
-
- registerDepRule("secondary indexes containing column as key reach write-only before column",
- scgraph.Precedence,
- "index", "column",
- func(from, to NodeVars) rel.Clauses {
- ic := MkNodeVars("index-column")
- relationID, columnID := rel.Var("table-id"), rel.Var("column-id")
- return rel.Clauses{
- from.Type((*scpb.SecondaryIndex)(nil)),
- to.Type((*scpb.Column)(nil)),
- ColumnInIndex(ic, from, relationID, columnID, "index-id"),
- JoinOnColumnID(ic, to, relationID, columnID),
- StatusesToAbsent(from, scpb.Status_VALIDATED, to, scpb.Status_WRITE_ONLY),
- descriptorIsNotBeingDropped(ic.El),
- rel.Filter("isIndexKeyColumnKey", ic.El)(
- func(ic *scpb.IndexColumn) bool {
- return ic.Kind == scpb.IndexColumn_KEY || ic.Kind == scpb.IndexColumn_KEY_SUFFIX
- },
- ),
- }
- })
-
- // This rule ensures if we are dropping a column, we only transition to non-public after
- // all adding indexes are present in the table descriptor (i.e. all adding indexes reached
- // BACKFILL_ONLY).
- // This is used when we are dropping a column but have intermediate primary indexes;
- // we should delay transitioning the column to non-public after all primary indexes
- // have been added to the table descriptor as mutations.
- registerDepRule("all adding indexes reached BACKFILL_ONLY before any of their columns disappear",
- scgraph.Precedence,
- "index", "column",
- func(from, to NodeVars) rel.Clauses {
- ic := MkNodeVars("index-column")
- relationID, columnID := rel.Var("table-id"), rel.Var("column-id")
- return rel.Clauses{
- from.Type((*scpb.PrimaryIndex)(nil), (*scpb.SecondaryIndex)(nil)),
- to.Type((*scpb.Column)(nil)),
- ColumnInIndex(ic, from, relationID, columnID, "index-id"),
- JoinOnColumnID(ic, to, relationID, columnID),
- from.TargetStatus(scpb.ToPublic, scpb.Transient),
- from.CurrentStatus(scpb.Status_BACKFILL_ONLY),
- to.TargetStatus(scpb.ToAbsent),
- to.CurrentStatus(scpb.Status_WRITE_ONLY),
- }
- })
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_object.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_object.go
deleted file mode 100644
index 32b095824f0a..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_drop_object.go
+++ /dev/null
@@ -1,295 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl"
-)
-
-// These rules ensure that:
-// - a descriptor reaches ABSENT in a different transaction than it reaches
-// DROPPED (i.e. it cannot be removed until PostCommit).
-// - a descriptor element reaches the DROPPED state in the txn before
-// its dependent elements (namespace entry, comments, column names, etc)
-// reach the ABSENT state;
-// - or the WRITE_ONLY state for those dependent elements subject to the
-// 2-version invariant.
-func init() {
-
- registerDepRule(
- "descriptor dropped in transaction before removal",
- scgraph.PreviousTransactionPrecedence,
- "dropped", "absent",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isDescriptor),
- from.El.AttrEqVar(screl.DescID, "_"),
- from.El.AttrEqVar(rel.Self, to.El),
- StatusesToAbsent(from, scpb.Status_DROPPED, to, scpb.Status_ABSENT),
- }
- })
-
- registerDepRule(
- "descriptor dropped before dependent element removal",
- scgraph.Precedence,
- "descriptor", "dependent",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isDescriptor),
- to.TypeFilter(rulesVersionKey, Or(isSimpleDependent, isOwner), Not(isConstraintDependent)),
- JoinOnDescID(from, to, "desc-id"),
- StatusesToAbsent(from, scpb.Status_DROPPED, to, scpb.Status_ABSENT),
- }
- })
-
- registerDepRule(
- "relation dropped before dependent column",
- scgraph.Precedence,
- "descriptor", "column",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.Table)(nil), (*scpb.View)(nil), (*scpb.Sequence)(nil)),
- to.TypeFilter(rulesVersionKey, isColumn),
- JoinOnDescID(from, to, "desc-id"),
- StatusesToAbsent(from, scpb.Status_DROPPED, to, scpb.Status_WRITE_ONLY),
- }
- })
-
- registerDepRule(
- "relation dropped before dependent index",
- scgraph.Precedence,
- "descriptor", "index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.Table)(nil), (*scpb.View)(nil)),
- to.TypeFilter(rulesVersionKey, isIndex),
- JoinOnDescID(from, to, "desc-id"),
- StatusesToAbsent(from, scpb.Status_DROPPED, to, scpb.Status_VALIDATED),
- }
- },
- )
-
- registerDepRule(
- "relation dropped before dependent constraint",
- scgraph.Precedence,
- "descriptor", "constraint",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.Table)(nil)),
- to.TypeFilter(rulesVersionKey, isNonIndexBackedConstraint, isSubjectTo2VersionInvariant, Not(isNonIndexBackedCrossDescriptorConstraint)),
- JoinOnDescID(from, to, "desc-id"),
- StatusesToAbsent(from, scpb.Status_DROPPED, to, scpb.Status_VALIDATED),
- }
- },
- )
-
-}
-
-// These rules ensure that cross-referencing simple dependent elements reach
-// ABSENT in the same stage right after the referenced descriptor element
-// reaches DROPPED.
-//
-// References from simple dependent elements to other descriptors exist as
-// follows:
-// - simple dependent elements with a ReferencedDescID attribute,
-// - those which embed a TypeT,
-// - those which embed an Expression.
-func init() {
-
- registerDepRule(
- "descriptor drop right before removing dependent with attr ref",
- scgraph.SameStagePrecedence,
- "referenced-descriptor", "referencing-via-attr",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isDescriptor),
- to.TypeFilter(rulesVersionKey, isSimpleDependent, Not(isDescriptorParentReference)),
- JoinReferencedDescID(to, from, "desc-id"),
- StatusesToAbsent(from, scpb.Status_DROPPED, to, scpb.Status_ABSENT),
- }
- },
- )
-
- // If the descriptor references this type is already being dropped, then
- // the back references don't really matter.
- registerDepRule(
- "descriptor drop right before removing dependent between types",
- scgraph.SameStagePrecedence,
- "referenced-descriptor", "referencing-via-type",
- func(from, to NodeVars) rel.Clauses {
- fromDescID := rel.Var("fromDescID")
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isTypeDescriptor),
- from.DescIDEq(fromDescID),
- to.ReferencedTypeDescIDsContain(fromDescID),
- to.TypeFilter(rulesVersionKey, isSimpleDependent, isWithTypeT),
- StatusesToAbsent(from, scpb.Status_DROPPED, to, scpb.Status_ABSENT),
- }
- },
- )
- registerDepRule(
- "descriptor drop right before removing dependent with type refs in expressions",
- scgraph.SameStagePrecedence,
- "referenced-descriptor", "referencing-via-type",
- func(from, to NodeVars) rel.Clauses {
- fromDescID := rel.Var("fromDescID")
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isTypeDescriptor),
- from.DescIDEq(fromDescID),
- to.ReferencedTypeDescIDsContain(fromDescID),
- descriptorIsNotBeingDropped(to.El),
- to.TypeFilter(rulesVersionKey, isSimpleDependent, isWithExpression),
- StatusesToAbsent(from, scpb.Status_DROPPED, to, scpb.Status_ABSENT),
- }
- },
- )
-
- registerDepRule(
- "descriptor drop right before removing dependent with expr ref to sequence",
- scgraph.SameStagePrecedence,
- "referenced-descriptor", "referencing-via-expr",
- func(from, to NodeVars) rel.Clauses {
- seqID := rel.Var("seqID")
- return rel.Clauses{
- from.Type((*scpb.Sequence)(nil)),
- from.DescIDEq(seqID),
- to.ReferencedSequenceIDsContains(seqID),
- to.TypeFilter(rulesVersionKey, isSimpleDependent, isWithExpression),
- StatusesToAbsent(from, scpb.Status_DROPPED, to, scpb.Status_ABSENT),
- }
- },
- )
-
- registerDepRule(
- "descriptor drop right before removing dependent with function refs in columns",
- scgraph.SameStagePrecedence,
- "referenced-descriptor", "referencing-via-function",
- func(from, to NodeVars) rel.Clauses {
- fromDescID := rel.Var("fromDescID")
- return rel.Clauses{
- from.Type((*scpb.Function)(nil)),
- from.DescIDEq(fromDescID),
- to.ReferencedFunctionIDsContains(fromDescID),
- to.TypeFilter(rulesVersionKey, isSimpleDependent, isWithExpression),
- StatusesToAbsent(from, scpb.Status_DROPPED, to, scpb.Status_ABSENT),
- }
- },
- )
-
-}
-
-// These rules ensure that descriptor, back-reference in parent descriptor,
-// and parent descriptor are dropped in appropriate order.
-func init() {
-
- // We don't like those parent-descriptor-back-reference elements: in hindsight,
- // we shouldn't have them in the first place because we cannot modify
- // back-references in parent descriptor in isolation with the SQL syntax.
- // This rule is to deal with this fact by tightly coupling them to the descriptor.
- registerDepRule(
- "descriptor dropped right before removing back-reference in its parent descriptor",
- scgraph.SameStagePrecedence,
- "descriptor", "back-reference-in-parent-descriptor",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isDescriptor),
- to.TypeFilter(rulesVersionKey, isDescriptorParentReference),
- JoinOnDescID(from, to, "desc-id"),
- StatusesToAbsent(from, scpb.Status_DROPPED, to, scpb.Status_ABSENT),
- }
- })
-
- registerDepRule(
- "back-reference in parent descriptor is removed before parent descriptor is dropped",
- scgraph.Precedence,
- "back-reference-in-parent-descriptor", "parent-descriptor",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isDescriptorParentReference),
- to.TypeFilter(rulesVersionKey, isDescriptor),
- JoinReferencedDescID(from, to, "desc-id"),
- StatusesToAbsent(from, scpb.Status_ABSENT, to, scpb.Status_DROPPED),
- }
- },
- )
-}
-
-// These rules ensure that dependents get removed before the descriptor.
-// Some operations might require the descriptor to actually be present.
-func init() {
- registerDepRule(
- "non-data dependents removed before descriptor",
- scgraph.Precedence,
- "dependent", "descriptor",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, Not(isDescriptor), Not(isData)),
- to.TypeFilter(rulesVersionKey, isDescriptor),
- JoinOnDescID(from, to, "desc-id"),
- StatusesToAbsent(from, scpb.Status_ABSENT, to, scpb.Status_ABSENT),
- }
- })
-}
-
-// These rules ensures we drop cross-descriptor constraints before dropping
-// descriptors, both the referencing and referenced. Namely,
-// 1. cross-descriptor constraints are absent before referenced descriptor, if
-// the referencing table is not being dropped.
-// 2. cross-descriptor constraints are absent before referencing descriptor, if
-// the referenced table is not dropped.
-//
-// A canonical example is FKs:
-// To illustrate why rule 1 is necessary, consider we have tables `t1` and `t2`,
-// and `t1` has a FK to `t2` (call this schema `S1`). The statement is
-// `DROP TABLE t2 CASCADE`. We will have to first transition the FK (dropped as
-// a result of CASCADE) to an intermediate state and then (in a separate
-// transaction) transition the table to the dropped state. Otherwise, if the FK
-// transition to absent in the same transaction as the table becomes dropped
-// (call this schema `S2`), it becomes unsafe for `S1` and `S2` to exist in the
-// cluster at the same time, because allowed inserts under `S2` will violate `S1`.
-//
-// To illustrate why rule 2 is necessary, consider we have tables `t1`, `t2`, `t3`,
-// and `t1` FKs to `t2` (call it `FK1`) and `t3` FKs to `t2` (call it `FK2`).
-// The statement is `DROP TABLE t1, t2 CASCADE`. Without rule 2, rule 1 alone will
-// ensure that `FK2` moves to an intermediate state first, and at the same stage,
-// `t1` will be dropped together with `FK1`. Validation will then fail because
-// `t2` will have an enforced FK constraint whose origin table (`t1`) is dropped.
-// It's worth noting that relaxing validation in this case is safe but we choose
-// not to do so because it requires other related changes and makes reasoning
-// harder.
-func init() {
- registerDepRule(
- "cross-descriptor constraint is absent before referenced descriptor is dropped",
- scgraph.Precedence,
- "cross-desc-constraint", "referenced-descriptor",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isNonIndexBackedCrossDescriptorConstraint, isSubjectTo2VersionInvariant),
- to.TypeFilter(rulesVersionKey, isDescriptor),
- JoinReferencedDescID(from, to, "desc-id"),
- StatusesToAbsent(from, scpb.Status_ABSENT, to, scpb.Status_DROPPED),
- }
- },
- )
-
- registerDepRule(
- "cross-descriptor constraint is absent before referencing descriptor is dropped",
- scgraph.Precedence,
- "cross-desc-constraint", "referencing-descriptor",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isNonIndexBackedCrossDescriptorConstraint, isSubjectTo2VersionInvariant),
- to.TypeFilter(rulesVersionKey, isDescriptor),
- JoinOnDescID(from, to, "desc-id"),
- StatusesToAbsent(from, scpb.Status_ABSENT, to, scpb.Status_DROPPED),
- }
- },
- )
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_garbage_collection.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_garbage_collection.go
deleted file mode 100644
index 9fd4873bb3d3..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_garbage_collection.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
- "github.com/cockroachdb/cockroach/pkg/sql/sem/catid"
-)
-
-// Rules related to garbage collection.
-// Garbage collection must occur:
-// - in the same stage as the descriptor disappears;
-// - for indexes, not before the index disappears;
-// - all in the same stage for each descriptor.
-func init() {
-
- registerDepRule(
- "table removed right before garbage collection",
- scgraph.SameStagePrecedence,
- "table", "data",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isDescriptor),
- to.Type((*scpb.TableData)(nil)),
- JoinOnDescID(from, to, "table-id"),
- StatusesToAbsent(from, scpb.Status_ABSENT, to, scpb.Status_DROPPED),
- }
- },
- )
-
- registerDepRule(
- "descriptor removed right before garbage collection",
- scgraph.SameStagePrecedence,
- "database", "data",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isDescriptor),
- to.Type((*scpb.DatabaseData)(nil)),
- JoinOnDescID(from, to, "db-id"),
- StatusesToAbsent(from, scpb.Status_ABSENT, to, scpb.Status_DROPPED),
- }
- },
- )
-
- registerDepRuleForDrop(
- "index removed before garbage collection",
- scgraph.Precedence,
- "index", "index-data",
- scpb.Status_ABSENT, scpb.Status_DROPPED,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isIndex),
- to.Type((*scpb.IndexData)(nil)),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- }
- },
- )
-
- dataIDs := func(data scpb.Element) (catid.DescID, catid.IndexID) {
- switch data := data.(type) {
- case *scpb.DatabaseData:
- return data.DatabaseID, 0
- case *scpb.TableData:
- return data.TableID, 0
- case *scpb.IndexData:
- return data.TableID, data.IndexID
- }
- return 0, 0
- }
-
- // GC jobs should all be scheduled in the same transaction.
- registerDepRuleForDrop(
- "schedule all GC jobs for a descriptor in the same stage",
- scgraph.SameStagePrecedence,
- "data-a", "data-b",
- scpb.Status_DROPPED, scpb.Status_DROPPED,
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isData),
- to.TypeFilter(rulesVersionKey, isData),
- JoinOnDescID(from, to, "desc-id"),
- FilterElements("SmallerIDsFirst", from, to, func(a, b scpb.Element) bool {
- aDescID, aIdxID := dataIDs(a)
- bDescID, bIdxID := dataIDs(b)
- if aDescID == bDescID {
- return aIdxID < bIdxID
- }
- return aDescID < bDescID
- }),
- }
- },
- )
-}
-
-// Rules to ensure proper garbage collection on rollbacks.
-// A GC job is required as soon as a new index receives data.
-func init() {
-
- registerDepRule(
- "index data exists as soon as index accepts backfills",
- scgraph.SameStagePrecedence,
- "index-name", "index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type(
- (*scpb.PrimaryIndex)(nil),
- (*scpb.SecondaryIndex)(nil),
- ),
- to.Type((*scpb.IndexData)(nil)),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- StatusesToPublicOrTransient(from, scpb.Status_BACKFILL_ONLY, to, scpb.Status_PUBLIC),
- }
- },
- )
-
- registerDepRule(
- "temp index data exists as soon as temp index accepts writes",
- scgraph.SameStagePrecedence,
- "temp-index", "temp-index-data",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.TemporaryIndex)(nil)),
- to.Type((*scpb.IndexData)(nil)),
- JoinOnIndexID(from, to, "table-id", "index-id"),
- StatusesToPublicOrTransient(from, scpb.Status_WRITE_ONLY, to, scpb.Status_PUBLIC),
- }
- },
- )
-}
-
-// Rules to ensure for created objects the table data will be live after the
-// descriptor is public.
-func init() {
- registerDepRule(
- "table added right before data element",
- scgraph.Precedence,
- "table", "data",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.TypeFilter(rulesVersionKey, isDescriptor),
- to.TypeFilter(rulesVersionKey, isData),
- JoinOnDescID(from, to, "table-id"),
- StatusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_PUBLIC),
- }
- },
- )
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_swap_index.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_swap_index.go
deleted file mode 100644
index a72140d1845b..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_swap_index.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl"
-)
-
-// This rule ensures that a new primary index becomes public right after the
-// old primary index starts getting removed, effectively swapping one for the
-// other. This rule also applies when the schema change gets reverted.
-func init() {
- registerDepRule(
- "primary index swap",
- scgraph.SameStagePrecedence,
- "old-index", "new-index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.PrimaryIndex)(nil)),
- to.Type((*scpb.PrimaryIndex)(nil)),
- JoinOnDescID(from, to, "table-id"),
- JoinOn(
- from, screl.IndexID,
- to, screl.SourceIndexID,
- "old-index-id",
- ),
- from.TargetStatus(scpb.ToAbsent),
- from.CurrentStatus(scpb.Status_VALIDATED),
- to.TargetStatus(scpb.ToPublic, scpb.Transient),
- to.CurrentStatus(scpb.Status_PUBLIC),
- }
- },
- )
-
- registerDepRule(
- "primary index swap",
- scgraph.SameStagePrecedence,
- "old-index", "new-index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.PrimaryIndex)(nil)),
- to.Type((*scpb.PrimaryIndex)(nil)),
- JoinOnDescID(from, to, "table-id"),
- JoinOn(
- from, screl.IndexID,
- to, screl.SourceIndexID,
- "old-index-id",
- ),
- from.TargetStatus(scpb.Transient),
- from.CurrentStatus(scpb.Status_TRANSIENT_VALIDATED),
- to.TargetStatus(scpb.ToPublic, scpb.Transient),
- to.CurrentStatus(scpb.Status_PUBLIC),
- }
- },
- )
-
- registerDepRule(
- "primary index swap",
- scgraph.SameStagePrecedence,
- "new-index", "old-index",
- func(from, to NodeVars) rel.Clauses {
- return rel.Clauses{
- from.Type((*scpb.PrimaryIndex)(nil)),
- to.Type((*scpb.PrimaryIndex)(nil)),
- JoinOnDescID(from, to, "table-id"),
- JoinOn(
- from, screl.SourceIndexID,
- to, screl.IndexID,
- "old-index-id",
- ),
- from.TargetStatus(scpb.ToAbsent),
- from.CurrentStatus(scpb.Status_VALIDATED),
- to.TargetStatus(scpb.ToPublic),
- to.CurrentStatus(scpb.Status_PUBLIC),
- }
- },
- )
-}
-
-// This rule ensures that when a transient primary index is involved in the
-// swap, the old index is gone before the new index is instated.
-func init() {
-
- registerDepRule(
- "old index absent before new index public when swapping with transient",
- scgraph.Precedence,
- "old-primary-index", "new-primary-index",
- func(from, to NodeVars) rel.Clauses {
- union := MkNodeVars("transient-primary-index")
- relationID := rel.Var("table-id")
- return rel.Clauses{
- from.Type((*scpb.PrimaryIndex)(nil)),
- union.Type((*scpb.PrimaryIndex)(nil)),
- to.Type((*scpb.PrimaryIndex)(nil)),
- JoinOnDescID(from, union, relationID),
- JoinOn(
- from, screl.IndexID,
- union, screl.SourceIndexID,
- "old-index-id",
- ),
- JoinOnDescID(union, to, relationID),
- JoinOn(
- union, screl.IndexID,
- to, screl.SourceIndexID,
- "transient-index-id",
- ),
- from.TargetStatus(scpb.ToAbsent),
- from.CurrentStatus(scpb.Status_ABSENT),
- to.TargetStatus(scpb.ToPublic),
- to.CurrentStatus(scpb.Status_PUBLIC),
- }
- },
- )
-}
-
-// This rule ensures that when secondary indexes are re-created after a primary
-// index key is changed, that the secondary indexes are swapped in an atomic
-// manner, so that queries are not impacted by missing indexes.
-func init() {
- // This ia strict version of the rule that will only work, when a node
- // is generating a plan on the latest master / 23.1. The StrictRecreate flag
- // will be used to tag if the existing secondary index was created on a
- // new enough version.
- registerDepRule(
- "replacement secondary index should be validated before the old one becomes invisible",
- scgraph.Precedence,
- "new-index", "old-index",
- func(from, to NodeVars) rel.Clauses {
- // Detect a potential secondary index recreation because of a ALTER
- // PRIMARY KEY, and require that the new index should be public,
- // before the old index can be hidden (i.e. they are swapped
- // an atomic manner).
- return append(IsPotentialSecondaryIndexSwap("index-id", "table-id"),
- from.CurrentStatus(scpb.Status_PUBLIC),
- to.CurrentStatus(scpb.Status_VALIDATED),
- )
- },
- )
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_two_version.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_two_version.go
deleted file mode 100644
index 49ad6be0f948..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/dep_two_version.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2022 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "fmt"
- "reflect"
-
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/opgen"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl"
-)
-
-// These rules ensure that changes to properties of descriptors which need to
-// be sequenced in order to safely enact online schema changes are sequenced
-// in separate transactions.
-func init() {
-
- findNoopSourceStatuses := func(
- el scpb.Element, targetStatus scpb.TargetStatus,
- ) map[scpb.Status][]scpb.Status {
- // We want to skip the dependency edges if the status which got us into
- // the current status was a no-op. We track the no-op status parent nodes,
- // and we'll add a not-join to make sure there does not exist a node
- // with such a status when installing the rule.
- //
- // This is necessary to deal with cases like the transition from
- // BACKFILL_ONLY to its equivalent DELETE_ONLY in the rollback of an
- // ADD COLUMN. We don't want or need a dependency edge from DELETE_ONLY
- // to ABSENT in that case, but if we didn't check whether we got to
- // DELETE_ONLY from BACKFILL_ONLY, then we'd have one implicitly.
- statusMap := map[scpb.Status][]scpb.Status{}
- if err := opgen.IterateTransitions(el, targetStatus, func(
- t opgen.Transition,
- ) error {
- if !t.OpType().IsValid() {
- statusMap[t.To()] = append(statusMap[t.To()], t.From())
- }
- return nil
- }); err != nil {
- panic(err)
- }
- return statusMap
- }
- clausesForTwoVersionEdge := func(
- from, to NodeVars,
- el scpb.Element,
- targetStatus scpb.TargetStatus,
- t opgen.Transition,
- prePrevStatuses []scpb.Status,
- ) rel.Clauses {
- descriptorData := MkNodeVars("descriptor-data")
- var descID rel.Var = "descID"
- clauses := rel.Clauses{
- from.Type(el),
- to.Type(el),
- from.El.AttrEqVar(screl.DescID, descID),
- from.El.AttrEqVar(rel.Self, to.El),
- from.Target.AttrEqVar(rel.Self, to.Target),
- from.Target.AttrEq(screl.TargetStatus, targetStatus.Status()),
- from.Node.AttrEq(screl.CurrentStatus, t.From()),
- to.Node.AttrEq(screl.CurrentStatus, t.To()),
- descriptorIsNotBeingDropped(from.El),
- // Make sure to join a data element to confirm that data exists.
- descriptorData.Type((*scpb.TableData)(nil)),
- descriptorData.JoinTargetNode(),
- descriptorData.CurrentStatus(scpb.Status_PUBLIC),
- descriptorData.DescIDEq(descID),
- descriptorDataIsNotBeingAdded(descID),
- }
- if len(prePrevStatuses) > 0 {
- clauses = append(clauses,
- GetNotJoinOnNodeWithStatusIn(prePrevStatuses)(from.Target),
- )
- }
- return clauses
- }
- addRules := func(el scpb.Element, targetStatus scpb.TargetStatus) {
- statusMap := findNoopSourceStatuses(el, targetStatus)
- if err := opgen.IterateTransitions(el, targetStatus, func(
- t opgen.Transition,
- ) error {
- elemName := reflect.TypeOf(el).Elem().Name()
- ruleName := scgraph.RuleName(fmt.Sprintf(
- "%s transitions to %s uphold 2-version invariant: %s->%s",
- elemName, targetStatus.Status(), t.From(), t.To(),
- ))
- registerDepRule(
- ruleName,
- scgraph.PreviousTransactionPrecedence,
- "prev", "next",
- func(from, to NodeVars) rel.Clauses {
- return clausesForTwoVersionEdge(
- from, to, el, targetStatus, t, statusMap[t.From()],
- )
- },
- )
- return nil
- }); err != nil {
- panic(err)
- }
- }
- _ = scpb.ForEachElementType(func(el scpb.Element) error {
- if !isSubjectTo2VersionInvariant(el) {
- return nil
- }
- if opgen.HasPublic(el) {
- addRules(el, scpb.ToPublic)
- }
- if opgen.HasTransient(el) {
- addRules(el, scpb.Transient)
- }
- addRules(el, scpb.ToAbsent) // every element has ToAbsent
- return nil
- })
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/helpers.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/helpers.go
deleted file mode 100644
index e22a22ed78f5..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/helpers.go
+++ /dev/null
@@ -1,327 +0,0 @@
-// Copyright 2023 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/clusterversion"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl"
- "github.com/cockroachdb/errors"
-)
-
-const (
- // rulesVersion version of elements that can be appended to rel rule names.
- rulesVersion = "-24.3"
-)
-
-// rulesVersionKey version of elements used by this rule set.
-var rulesVersionKey = clusterversion.V24_3
-
-// descriptorIsNotBeingDropped creates a clause which leads to the outer clause
-// failing to unify if the passed element is part of a descriptor and
-// that descriptor is being dropped.
-var descriptorIsNotBeingDropped = screl.Schema.DefNotJoin1(
- "descriptorIsNotBeingDropped"+rulesVersion, "element", func(
- element rel.Var,
- ) rel.Clauses {
- descriptor := rules.MkNodeVars("descriptor")
- return rel.Clauses{
- descriptor.TypeFilter(rulesVersionKey, isDescriptor),
- descriptor.JoinTarget(),
- rules.JoinOnDescIDUntyped(descriptor.El, element, "id"),
- descriptor.TargetStatus(scpb.ToAbsent),
- }
- },
-)
-
-// descriptorDataIsNotBeingAdded indicates if we are operating on a descriptor
-// that already exists and was not created in the current transaction. This is
-// determined by detecting if the data element is public, and not going from
-// absent to public which newly created descriptors will.
-var descriptorDataIsNotBeingAdded = screl.Schema.DefNotJoin1(
- "descriptorIsDataNotBeingAdded"+rulesVersion, "descID", func(
- descID rel.Var,
- ) rel.Clauses {
- descriptorData := rules.MkNodeVars("descriptor-data")
- prevDescriptorData := rules.MkNodeVars("prev-descriptor-data")
- return rel.Clauses{
- descriptorData.Type((*scpb.TableData)(nil)),
- descriptorData.JoinTargetNode(),
- descriptorData.CurrentStatus(scpb.Status_PUBLIC),
- descriptorData.DescIDEq(descID),
- prevDescriptorData.Type((*scpb.TableData)(nil)),
- prevDescriptorData.JoinTargetNode(),
- prevDescriptorData.CurrentStatus(scpb.Status_ABSENT),
- prevDescriptorData.DescIDEq(descID),
- prevDescriptorData.El.AttrEqVar(rel.Self, descriptorData.El),
- }
- },
-)
-
-// isDescriptor returns true for a descriptor-element, i.e. an element which
-// owns its corresponding descriptor.
-func isDescriptor(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.Database, *scpb.Schema, *scpb.Table, *scpb.View, *scpb.Sequence,
- *scpb.AliasType, *scpb.EnumType, *scpb.CompositeType, *scpb.Function:
- return true
- }
- return false
-}
-
-// IsDescriptor returns true for a descriptor-element, i.e. an element which
-// owns its corresponding descriptor. This is only used for exports
-func IsDescriptor(e scpb.Element) bool {
- return isDescriptor(e)
-}
-
-func isSubjectTo2VersionInvariant(e scpb.Element) bool {
- // TODO(ajwerner): This should include constraints and enum values but it
- // currently does not because we do not support dropping them unless we're
- // dropping the descriptor and we do not support adding them.
- if isIndex(e) || isColumn(e) {
- return true
- }
- switch e.(type) {
- case *scpb.CheckConstraint, *scpb.UniqueWithoutIndexConstraint, *scpb.ForeignKeyConstraint,
- *scpb.ColumnNotNull:
- return true
- }
- return false
-}
-
-func isIndex(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.PrimaryIndex, *scpb.SecondaryIndex, *scpb.TemporaryIndex:
- return true
- }
- return false
-}
-
-func isIndexColumn(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.IndexColumn:
- return true
- }
- return false
-}
-
-func isColumn(e scpb.Element) bool {
- _, ok := e.(*scpb.Column)
- return ok
-}
-
-func isSimpleDependent(e scpb.Element) bool {
- return !isDescriptor(e) && !isSubjectTo2VersionInvariant(e) && !isData(e)
-}
-
-func getTypeT(element scpb.Element) (*scpb.TypeT, error) {
- switch e := element.(type) {
- case *scpb.ColumnType:
- if e == nil {
- return nil, nil
- }
- return &e.TypeT, nil
- case *scpb.AliasType:
- if e == nil {
- return nil, nil
- }
- return &e.TypeT, nil
- }
- return nil, errors.AssertionFailedf("element %T does not have an embedded scpb.TypeT", element)
-}
-
-func isWithTypeT(element scpb.Element) bool {
- _, err := getTypeT(element)
- return err == nil
-}
-
-func getExpression(element scpb.Element) (*scpb.Expression, error) {
- switch e := element.(type) {
- case *scpb.ColumnType:
- if e == nil {
- return nil, nil
- }
- return e.ComputeExpr, nil
- case *scpb.ColumnComputeExpression:
- if e == nil {
- return nil, nil
- }
- return &e.Expression, nil
- case *scpb.ColumnDefaultExpression:
- if e == nil {
- return nil, nil
- }
- return &e.Expression, nil
- case *scpb.ColumnOnUpdateExpression:
- if e == nil {
- return nil, nil
- }
- return &e.Expression, nil
- case *scpb.SecondaryIndex:
- if e == nil || e.EmbeddedExpr == nil {
- return nil, nil
- }
- return e.EmbeddedExpr, nil
- case *scpb.SecondaryIndexPartial:
- if e == nil {
- return nil, nil
- }
- return &e.Expression, nil
- case *scpb.CheckConstraint:
- if e == nil {
- return nil, nil
- }
- return &e.Expression, nil
- case *scpb.CheckConstraintUnvalidated:
- if e == nil {
- return nil, nil
- }
- return &e.Expression, nil
- }
- return nil, errors.AssertionFailedf("element %T does not have an embedded scpb.Expression", element)
-}
-
-func isWithExpression(element scpb.Element) bool {
- _, err := getExpression(element)
- return err == nil
-}
-
-func isTypeDescriptor(element scpb.Element) bool {
- switch element.(type) {
- case *scpb.EnumType, *scpb.AliasType, *scpb.CompositeType:
- return true
- default:
- return false
- }
-}
-
-func isColumnDependent(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.ColumnType, *scpb.ColumnNotNull:
- return true
- case *scpb.ColumnName, *scpb.ColumnComment, *scpb.IndexColumn:
- return true
- }
- return isColumnTypeDependent(e)
-}
-
-func isColumnNotNull(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.ColumnNotNull:
- return true
- }
- return false
-}
-func isColumnTypeDependent(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.SequenceOwner, *scpb.ColumnDefaultExpression, *scpb.ColumnOnUpdateExpression, *scpb.ColumnComputeExpression:
- return true
- }
- return false
-}
-
-func isIndexDependent(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.IndexName, *scpb.IndexComment, *scpb.IndexColumn,
- *scpb.IndexZoneConfig:
- return true
- case *scpb.IndexPartitioning, *scpb.PartitionZoneConfig, *scpb.SecondaryIndexPartial:
- return true
- }
- return false
-}
-
-// CRDB supports five constraints of two categories:
-// - PK, Unique (index-backed)
-// - Check, UniqueWithoutIndex, FK (non-index-backed)
-func isConstraint(e scpb.Element) bool {
- return isIndex(e) || isNonIndexBackedConstraint(e)
-}
-
-// isNonIndexBackedConstraint returns true if `e` is a non-index-backed constraint.
-func isNonIndexBackedConstraint(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.CheckConstraint, *scpb.UniqueWithoutIndexConstraint, *scpb.ForeignKeyConstraint,
- *scpb.ColumnNotNull:
- return true
- case *scpb.CheckConstraintUnvalidated, *scpb.UniqueWithoutIndexConstraintUnvalidated,
- *scpb.ForeignKeyConstraintUnvalidated:
- return true
- }
- return false
-}
-
-// isNonIndexBackedCrossDescriptorConstraint returns true if `e` is a
-// non-index-backed constraint and it can potentially reference another
-// descriptor.
-//
-// This filter exists because in general we need to drop the constraint first
-// before dropping referencing/referenced descriptor. Read rules that use
-// this filter for more details.
-//
-// TODO (xiang): UniqueWithoutIndex and UniqueWithoutIndexNotValid should
-// also be treated as cross-descriptor constraint because its partial predicate
-// can references other descriptors.
-func isNonIndexBackedCrossDescriptorConstraint(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.CheckConstraint, *scpb.UniqueWithoutIndexConstraint,
- *scpb.ForeignKeyConstraint:
- return true
- case *scpb.CheckConstraintUnvalidated, *scpb.UniqueWithoutIndexConstraintUnvalidated,
- *scpb.ForeignKeyConstraintUnvalidated:
- return true
- }
- return false
-}
-
-func isConstraintDependent(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.ConstraintWithoutIndexName:
- return true
- case *scpb.ConstraintComment:
- return true
- }
- return false
-}
-
-func isConstraintWithoutIndexName(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.ConstraintWithoutIndexName:
- return true
- }
- return false
-}
-
-func isData(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.DatabaseData:
- return true
- case *scpb.TableData:
- return true
- case *scpb.IndexData:
- return true
- }
- return false
-}
-
-func isDescriptorParentReference(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.SchemaChild, *scpb.SchemaParent:
- return true
- }
- return false
-}
-
-func isOwner(e scpb.Element) bool {
- switch e.(type) {
- case *scpb.Owner:
- return true
- }
- return false
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/registry.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/registry.go
deleted file mode 100644
index 0bc267784bda..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/registry.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2023 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
- . "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
-)
-
-var registry = NewRegistry()
-
-func registerDepRule(
- ruleName scgraph.RuleName,
- kind scgraph.DepEdgeKind,
- fromEl, toEl string,
- def func(from, to NodeVars) rel.Clauses,
-) {
- registry.RegisterDepRule(ruleName,
- kind,
- fromEl, toEl,
- def)
-}
-
-func registerDepRuleForDrop(
- ruleName scgraph.RuleName,
- kind scgraph.DepEdgeKind,
- from, to string,
- fromStatus, toStatus scpb.Status,
- fn func(from, to NodeVars) rel.Clauses,
-) {
- RegisterDepRuleForDrop(registry,
- ruleName,
- kind,
- from, to,
- fromStatus, toStatus,
- fn)
-}
-
-// GetRegistry returns the registry for this cockroach release.
-func GetRegistry() *Registry {
- return registry
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/rules_test.go b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/rules_test.go
deleted file mode 100644
index 408fcb9cce9a..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/rules_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2021 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package release_24_3
-
-import (
- "sort"
- "testing"
-
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl"
- "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils"
- "github.com/cockroachdb/datadriven"
- "github.com/stretchr/testify/require"
- "gopkg.in/yaml.v3"
-)
-
-// TestRulesYAML outputs the rules to yaml as a way to visualize changes.
-// Rules are sorted by name to ensure stable output.
-func TestRulesYAML(t *testing.T) {
- datadriven.Walk(t, datapathutils.TestDataPath(t), func(t *testing.T, path string) {
- datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string {
- switch d.Cmd {
- case "rules":
- var m yaml.Node
- m.Kind = yaml.MappingNode
- var s []rel.RuleDef
- screl.Schema.ForEachRule(func(def rel.RuleDef) {
- s = append(s, def)
- })
- sort.SliceStable(s, func(i, j int) bool {
- return s[i].Name < s[j].Name
- })
- for _, def := range s {
- var rule yaml.Node
- if err := rule.Encode(def); err != nil {
- panic(err)
- }
- m.Content = append(m.Content, rule.Content...)
- }
- out, err := yaml.Marshal(m)
- if err != nil {
- d.Fatalf(t, "failed to marshal rules: %v", err)
- }
- return string(out)
- case "deprules":
- out, err := registry.MarshalDepRules()
- require.NoError(t, err)
- return out
- }
- d.Fatalf(t, "deprules, oprules, and rules are the only commands, got %s", d.Cmd)
- return ""
- })
- })
-}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/testdata/deprules b/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/testdata/deprules
deleted file mode 100644
index bfcbdf09838c..000000000000
--- a/pkg/sql/schemachanger/scplan/internal/rules/release_24_3/testdata/deprules
+++ /dev/null
@@ -1,8751 +0,0 @@
-deprules
-----
-- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: TRANSIENT_ABSENT->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_VALIDATED_WRITE_ONLY_TRANSIENT_WRITE_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: ABSENT->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: PUBLIC->TRANSIENT_VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->TRANSIENT_ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $next-Node[CurrentStatus] = TRANSIENT_ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_WRITE_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->TRANSIENT_VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'Column transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.Column'
- - $next[Type] = '*scpb.Column'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'Column transitions to ABSENT uphold 2-version invariant: PUBLIC->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.Column'
- - $next[Type] = '*scpb.Column'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'Column transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.Column'
- - $next[Type] = '*scpb.Column'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'Column transitions to PUBLIC uphold 2-version invariant: ABSENT->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.Column'
- - $next[Type] = '*scpb.Column'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'Column transitions to PUBLIC uphold 2-version invariant: DELETE_ONLY->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.Column'
- - $next[Type] = '*scpb.Column'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'Column transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.Column'
- - $next[Type] = '*scpb.Column'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ColumnNotNull transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ColumnNotNull'
- - $next[Type] = '*scpb.ColumnNotNull'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ColumnNotNull transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ColumnNotNull'
- - $next[Type] = '*scpb.ColumnNotNull'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_WRITE_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ColumnNotNull transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ColumnNotNull'
- - $next[Type] = '*scpb.ColumnNotNull'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ColumnNotNull transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ColumnNotNull'
- - $next[Type] = '*scpb.ColumnNotNull'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ColumnNotNull transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ColumnNotNull'
- - $next[Type] = '*scpb.ColumnNotNull'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ColumnNotNull transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ColumnNotNull'
- - $next[Type] = '*scpb.ColumnNotNull'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: Computed column expression is dropped before the column it depends on
- from: column-expr-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-expr[Type] = '*scpb.ColumnComputeExpression'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($column-expr, $column, $table-id)
- - computedColumnTypeReferencesColumn(*scpb.ColumnComputeExpression, *scpb.Column)($column-expr, $column)
- - toAbsent($column-expr-Target, $column-Target)
- - $column-expr-Node[CurrentStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column-expr, $column-expr-Target, $column-expr-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: Computed column expression is dropped before the column it depends on
- from: column-expr-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-expr[Type] = '*scpb.ColumnComputeExpression'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($column-expr, $column, $table-id)
- - computedColumnTypeReferencesColumn(*scpb.ColumnComputeExpression, *scpb.Column)($column-expr, $column)
- - transient($column-expr-Target, $column-Target)
- - $column-expr-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($column-expr, $column-expr-Target, $column-expr-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: Computed column expression is dropped before the column it depends on
- from: column-expr-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-expr[Type] = '*scpb.ColumnComputeExpression'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($column-expr, $column, $table-id)
- - computedColumnTypeReferencesColumn(*scpb.ColumnComputeExpression, *scpb.Column)($column-expr, $column)
- - $column-expr-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-expr-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column-expr, $column-expr-Target, $column-expr-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: Computed column expression is dropped before the column it depends on
- from: column-expr-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-expr[Type] = '*scpb.ColumnComputeExpression'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($column-expr, $column, $table-id)
- - computedColumnTypeReferencesColumn(*scpb.ColumnComputeExpression, *scpb.Column)($column-expr, $column)
- - $column-expr-Target[TargetStatus] = ABSENT
- - $column-expr-Node[CurrentStatus] = ABSENT
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($column-expr, $column-expr-Target, $column-expr-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: Constraint should be hidden before name
- from: constraint-name-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($constraint-name, $constraint, $table-id, $constraint-id)
- - toAbsent($constraint-name-Target, $constraint-Target)
- - $constraint-name-Node[CurrentStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = ABSENT
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: Constraint should be hidden before name
- from: constraint-name-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($constraint-name, $constraint, $table-id, $constraint-id)
- - transient($constraint-name-Target, $constraint-Target)
- - $constraint-name-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: Constraint should be hidden before name
- from: constraint-name-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($constraint-name, $constraint, $table-id, $constraint-id)
- - $constraint-name-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-name-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $constraint-Target[TargetStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = ABSENT
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: Constraint should be hidden before name
- from: constraint-name-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($constraint-name, $constraint, $table-id, $constraint-id)
- - $constraint-name-Target[TargetStatus] = ABSENT
- - $constraint-name-Node[CurrentStatus] = ABSENT
- - $constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: Constraint should be hidden before name
- from: constraint-Node
- kind: Precedence
- to: constraint-name-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - joinOnConstraintID($constraint, $constraint-name, $table-id, $constraint-id)
- - toAbsent($constraint-Target, $constraint-name-Target)
- - $constraint-Node[CurrentStatus] = VALIDATED
- - $constraint-name-Node[CurrentStatus] = ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
-- name: Constraint should be hidden before name
- from: constraint-Node
- kind: Precedence
- to: constraint-name-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - joinOnConstraintID($constraint, $constraint-name, $table-id, $constraint-id)
- - transient($constraint-Target, $constraint-name-Target)
- - $constraint-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $constraint-name-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
-- name: Constraint should be hidden before name
- from: constraint-Node
- kind: Precedence
- to: constraint-name-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - joinOnConstraintID($constraint, $constraint-name, $table-id, $constraint-id)
- - $constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $constraint-name-Target[TargetStatus] = ABSENT
- - $constraint-name-Node[CurrentStatus] = ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
-- name: Constraint should be hidden before name
- from: constraint-Node
- kind: Precedence
- to: constraint-name-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - joinOnConstraintID($constraint, $constraint-name, $table-id, $constraint-id)
- - $constraint-Target[TargetStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = VALIDATED
- - $constraint-name-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-name-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
-- name: DEFAULT or ON UPDATE existence precedes writes to column
- from: expr-Node
- kind: Precedence
- to: column-Node
- query:
- - $expr[Type] IN ['*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($expr, $column, $table-id, $col-id)
- - ToPublicOrTransient($expr-Target, $column-Target)
- - $expr-Node[CurrentStatus] = PUBLIC
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($expr, $expr-Target, $expr-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: 'ForeignKeyConstraint transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ForeignKeyConstraint'
- - $next[Type] = '*scpb.ForeignKeyConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ForeignKeyConstraint transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ForeignKeyConstraint'
- - $next[Type] = '*scpb.ForeignKeyConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_WRITE_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ForeignKeyConstraint transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ForeignKeyConstraint'
- - $next[Type] = '*scpb.ForeignKeyConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ForeignKeyConstraint transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ForeignKeyConstraint'
- - $next[Type] = '*scpb.ForeignKeyConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ForeignKeyConstraint transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ForeignKeyConstraint'
- - $next[Type] = '*scpb.ForeignKeyConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ForeignKeyConstraint transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ForeignKeyConstraint'
- - $next[Type] = '*scpb.ForeignKeyConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: New primary index should go public only after columns being dropped move to WRITE_ONLY
- from: column-Node
- kind: Precedence
- to: new-primary-index-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $new-primary-index[Type] = '*scpb.PrimaryIndex'
- - ColumnInSourcePrimaryIndex($index-column, $new-primary-index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - $new-primary-index-Target[TargetStatus] = PUBLIC
- - $new-primary-index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($new-primary-index, $new-primary-index-Target, $new-primary-index-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = BACKFILLED
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILL_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = BACKFILL_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_DELETE_ONLY_BACKFILLED_TRANSIENT_BACKFILLED_BACKFILL_ONLY_TRANSIENT_BACKFILL_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: MERGED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = MERGED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: MERGE_ONLY->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = MERGE_ONLY
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_ABSENT->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_BACKFILLED->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_BACKFILLED
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_BACKFILL_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_BACKFILL_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_MERGED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_MERGED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_MERGE_ONLY->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_MERGE_ONLY
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: VALIDATED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_VALIDATED($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_VALIDATED_TRANSIENT_WRITE_ONLY_MERGE_ONLY_TRANSIENT_MERGE_ONLY_MERGED_TRANSIENT_MERGED($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: ABSENT->BACKFILL_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = BACKFILL_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = BACKFILLED
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILL_ONLY->BACKFILLED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = BACKFILL_ONLY
- - $next-Node[CurrentStatus] = BACKFILLED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: DELETE_ONLY->MERGE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = MERGE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: MERGED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = MERGED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: MERGE_ONLY->MERGED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = MERGE_ONLY
- - $next-Node[CurrentStatus] = MERGED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: ABSENT->BACKFILL_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = BACKFILL_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = BACKFILLED
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: BACKFILL_ONLY->BACKFILLED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = BACKFILL_ONLY
- - $next-Node[CurrentStatus] = BACKFILLED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: DELETE_ONLY->MERGE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = MERGE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: MERGED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = MERGED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: MERGE_ONLY->MERGED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = MERGE_ONLY
- - $next-Node[CurrentStatus] = MERGED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: PUBLIC->TRANSIENT_VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_BACKFILLED->TRANSIENT_DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_BACKFILLED
- - $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_BACKFILL_ONLY->TRANSIENT_DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_BACKFILL_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->TRANSIENT_ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_BACKFILLED_TRANSIENT_BACKFILL_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_MERGED->TRANSIENT_WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_MERGED
- - $next-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_MERGE_ONLY->TRANSIENT_WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_MERGE_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->TRANSIENT_WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $next-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->TRANSIENT_DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_VALIDATED_TRANSIENT_MERGE_ONLY_TRANSIENT_MERGED($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = BACKFILLED
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILL_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = BACKFILL_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_BACKFILLED_BACKFILL_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: MERGED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = MERGED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: MERGE_ONLY->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = MERGE_ONLY
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: VALIDATED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_VALIDATED_MERGE_ONLY_MERGED($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: ABSENT->BACKFILL_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = BACKFILL_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = BACKFILLED
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILL_ONLY->BACKFILLED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = BACKFILL_ONLY
- - $next-Node[CurrentStatus] = BACKFILLED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: DELETE_ONLY->MERGE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = MERGE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: MERGED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = MERGED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: MERGE_ONLY->MERGED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = MERGE_ONLY
- - $next-Node[CurrentStatus] = MERGED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_DELETE_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_ABSENT->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: ABSENT->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: DELETE_ONLY->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->TRANSIENT_ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: WRITE_ONLY->TRANSIENT_DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'UniqueWithoutIndexConstraint transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $next[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'UniqueWithoutIndexConstraint transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $next[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_WRITE_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'UniqueWithoutIndexConstraint transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $next[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'UniqueWithoutIndexConstraint transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $next[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'UniqueWithoutIndexConstraint transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $next[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'UniqueWithoutIndexConstraint transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $next[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: all adding indexes reached BACKFILL_ONLY before any of their columns disappear
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $column[Type] = '*scpb.Column'
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - $index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - $index-Node[CurrentStatus] = BACKFILL_ONLY
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: back-reference in parent descriptor is removed before parent descriptor is dropped
- from: back-reference-in-parent-descriptor-Node
- kind: Precedence
- to: parent-descriptor-Node
- query:
- - $back-reference-in-parent-descriptor[Type] IN ['*scpb.SchemaChild', '*scpb.SchemaParent']
- - $parent-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - joinReferencedDescID($back-reference-in-parent-descriptor, $parent-descriptor, $desc-id)
- - toAbsent($back-reference-in-parent-descriptor-Target, $parent-descriptor-Target)
- - $back-reference-in-parent-descriptor-Node[CurrentStatus] = ABSENT
- - $parent-descriptor-Node[CurrentStatus] = DROPPED
- - joinTargetNode($back-reference-in-parent-descriptor, $back-reference-in-parent-descriptor-Target, $back-reference-in-parent-descriptor-Node)
- - joinTargetNode($parent-descriptor, $parent-descriptor-Target, $parent-descriptor-Node)
-- name: column constraint removed right before column reaches write only
- from: column-constraint-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($column-constraint, $column, $table-id, $col-id)
- - toAbsent($column-constraint-Target, $column-Target)
- - $column-constraint-Node[CurrentStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column-constraint, $column-constraint-Target, $column-constraint-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: column constraint removed right before column reaches write only
- from: column-constraint-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($column-constraint, $column, $table-id, $col-id)
- - transient($column-constraint-Target, $column-Target)
- - $column-constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($column-constraint, $column-constraint-Target, $column-constraint-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: column constraint removed right before column reaches write only
- from: column-constraint-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($column-constraint, $column, $table-id, $col-id)
- - $column-constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column-constraint, $column-constraint-Target, $column-constraint-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: column constraint removed right before column reaches write only
- from: column-constraint-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($column-constraint, $column, $table-id, $col-id)
- - $column-constraint-Target[TargetStatus] = ABSENT
- - $column-constraint-Node[CurrentStatus] = ABSENT
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($column-constraint, $column-constraint-Target, $column-constraint-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: column dependents exist before column becomes public
- from: dependent-Node
- kind: Precedence
- to: column-Node
- query:
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($dependent, $column, $table-id, $col-id)
- - ToPublicOrTransient($dependent-Target, $column-Target)
- - $dependent-Node[CurrentStatus] = PUBLIC
- - $column-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: column existence precedes column dependents
- from: column-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - joinOnColumnID($column, $dependent, $table-id, $col-id)
- - ToPublicOrTransient($column-Target, $dependent-Target)
- - $column-Node[CurrentStatus] = DELETE_ONLY
- - $dependent-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: column existence precedes index existence
- from: column-Node
- kind: Precedence
- to: index-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - joinOnColumnID($column, $index-column, $table-id, $column-id)
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - ToPublicOrTransient($column-Target, $index-Target)
- - $column-Node[CurrentStatus] = DELETE_ONLY
- - $index-Node[CurrentStatus] = BACKFILL_ONLY
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: column existence precedes temp index existence
- from: column-Node
- kind: Precedence
- to: index-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $index[Type] = '*scpb.TemporaryIndex'
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - ToPublicOrTransient($column-Target, $index-Target)
- - $column-Node[CurrentStatus] = DELETE_ONLY
- - $index-Node[CurrentStatus] = DELETE_ONLY
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: column is WRITE_ONLY before temporary index is WRITE_ONLY
- from: column-Node
- kind: Precedence
- to: index-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $index[Type] = '*scpb.TemporaryIndex'
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - ToPublicOrTransient($column-Target, $index-Target)
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - $index-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: column name and type set right after column existence
- from: column-Node
- kind: SameStagePrecedence
- to: column-name-or-type-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $column-name-or-type[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType']
- - ToPublicOrTransient($column-Target, $column-name-or-type-Target)
- - $column-Node[CurrentStatus] = DELETE_ONLY
- - $column-name-or-type-Node[CurrentStatus] = PUBLIC
- - joinOnColumnID($column, $column-name-or-type, $table-id, $col-id)
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($column-name-or-type, $column-name-or-type-Target, $column-name-or-type-Node)
-- name: column no longer public before dependents
- from: column-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - joinOnColumnID($column, $dependent, $table-id, $col-id)
- - toAbsent($column-Target, $dependent-Target)
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: column no longer public before dependents
- from: column-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - joinOnColumnID($column, $dependent, $table-id, $col-id)
- - transient($column-Target, $dependent-Target)
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: column no longer public before dependents
- from: column-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - joinOnColumnID($column, $dependent, $table-id, $col-id)
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - $dependent-Target[TargetStatus] = ABSENT
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: column no longer public before dependents
- from: column-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - joinOnColumnID($column, $dependent, $table-id, $col-id)
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - $dependent-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: column public before non-index-backed constraint (including hash-sharded) is created
- from: column-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - $column[ColumnID] = $columnID
- - $constraint[ReferencedColumnIDs] CONTAINS $columnID
- - joinOnDescID($column, $constraint, $table-id)
- - ToPublicOrTransient($column-Target, $constraint-Target)
- - $column-Node[CurrentStatus] = PUBLIC
- - $constraint-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: column type dependents removed right before column type
- from: dependent-Node
- kind: SameStagePrecedence
- to: column-type-Node
- query:
- - $dependent[Type] IN ['*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner']
- - $column-type[Type] = '*scpb.ColumnType'
- - joinOnColumnID($dependent, $column-type, $table-id, $col-id)
- - toAbsent($dependent-Target, $column-type-Target)
- - $dependent-Node[CurrentStatus] = ABSENT
- - $column-type-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($column-type, $column-type-Target, $column-type-Node)
-- name: column type is changed to public after doing validation of a transient check constraint
- from: transient-check-constraint-Node
- kind: SameStagePrecedence
- to: column-type-Node
- query:
- - $transient-check-constraint[Type] = '*scpb.CheckConstraint'
- - $column-type[Type] = '*scpb.ColumnType'
- - joinOnDescID($transient-check-constraint, $column-type, $table-id)
- - $column-type[ColumnID] = $columnID
- - $transient-check-constraint[ReferencedColumnIDs] CONTAINS $columnID
- - $transient-check-constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-type-Target[TargetStatus] = PUBLIC
- - $transient-check-constraint-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $column-type-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($transient-check-constraint, $transient-check-constraint-Target, $transient-check-constraint-Node)
- - joinTargetNode($column-type, $column-type-Target, $column-type-Node)
-- name: column type removed before column family
- from: column-type-Node
- kind: Precedence
- to: column-family-Node
- query:
- - $column-type[Type] = '*scpb.ColumnType'
- - $column-family[Type] = '*scpb.ColumnFamily'
- - joinOnColumnFamilyID($column-type, $column-family, $table-id, $family-id)
- - toAbsent($column-type-Target, $column-family-Target)
- - $column-type-Node[CurrentStatus] = ABSENT
- - $column-family-Node[CurrentStatus] = ABSENT
- - joinTargetNode($column-type, $column-type-Target, $column-type-Node)
- - joinTargetNode($column-family, $column-family-Target, $column-family-Node)
-- name: column type removed right before column when not dropping relation
- from: column-type-Node
- kind: SameStagePrecedence
- to: column-Node
- query:
- - $column-type[Type] = '*scpb.ColumnType'
- - descriptorIsNotBeingDropped-24.3($column-type)
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($column-type, $column, $table-id, $col-id)
- - toAbsent($column-type-Target, $column-Target)
- - $column-type-Node[CurrentStatus] = ABSENT
- - $column-Node[CurrentStatus] = ABSENT
- - joinTargetNode($column-type, $column-type-Target, $column-type-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: column type update is decomposed as a drop then add
- from: old-column-type-Node
- kind: Precedence
- to: new-column-type-Node
- query:
- - $old-column-type[Type] = '*scpb.ColumnType'
- - $new-column-type[Type] = '*scpb.ColumnType'
- - joinOnColumnID($old-column-type, $new-column-type, $table-id, $col-id)
- - $old-column-type-Target[TargetStatus] = ABSENT
- - $old-column-type-Node[CurrentStatus] = PUBLIC
- - $new-column-type-Target[TargetStatus] = PUBLIC
- - $new-column-type-Node[CurrentStatus] = ABSENT
- - joinTargetNode($old-column-type, $old-column-type-Target, $old-column-type-Node)
- - joinTargetNode($new-column-type, $new-column-type-Target, $new-column-type-Node)
-- name: column writable right before column constraint is enforced.
- from: column-Node
- kind: SameStagePrecedence
- to: column-constraint-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $column-constraint[Type] = '*scpb.ColumnNotNull'
- - joinOnColumnID($column, $column-constraint, $table-id, $col-id)
- - ToPublicOrTransient($column-Target, $column-constraint-Target)
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - $column-constraint-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($column-constraint, $column-constraint-Target, $column-constraint-Node)
-- name: constraint dependent public right before complex constraint
- from: dependent-Node
- kind: SameStagePrecedence
- to: complex-constraint-Node
- query:
- - $dependent[Type] = '*scpb.ConstraintComment'
- - $complex-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - joinOnConstraintID($dependent, $complex-constraint, $table-id, $constraint-id)
- - ToPublicOrTransient($dependent-Target, $complex-constraint-Target)
- - $dependent-Node[CurrentStatus] = PUBLIC
- - $complex-constraint-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($complex-constraint, $complex-constraint-Target, $complex-constraint-Node)
-- name: constraint no longer public before dependents
- from: constraint-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $dependent[Type] = '*scpb.ConstraintComment'
- - joinOnConstraintID($constraint, $dependent, $table-id, $constraint-id)
- - toAbsent($constraint-Target, $dependent-Target)
- - $constraint-Node[CurrentStatus] = VALIDATED
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: constraint no longer public before dependents
- from: constraint-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $dependent[Type] = '*scpb.ConstraintComment'
- - joinOnConstraintID($constraint, $dependent, $table-id, $constraint-id)
- - transient($constraint-Target, $dependent-Target)
- - $constraint-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: constraint no longer public before dependents
- from: constraint-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $dependent[Type] = '*scpb.ConstraintComment'
- - joinOnConstraintID($constraint, $dependent, $table-id, $constraint-id)
- - $constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $dependent-Target[TargetStatus] = ABSENT
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: constraint no longer public before dependents
- from: constraint-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $dependent[Type] = '*scpb.ConstraintComment'
- - joinOnConstraintID($constraint, $dependent, $table-id, $constraint-id)
- - $constraint-Target[TargetStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = VALIDATED
- - $dependent-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: cross-descriptor constraint is absent before referenced descriptor is dropped
- from: cross-desc-constraint-Node
- kind: Precedence
- to: referenced-descriptor-Node
- query:
- - $cross-desc-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $referenced-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - joinReferencedDescID($cross-desc-constraint, $referenced-descriptor, $desc-id)
- - toAbsent($cross-desc-constraint-Target, $referenced-descriptor-Target)
- - $cross-desc-constraint-Node[CurrentStatus] = ABSENT
- - $referenced-descriptor-Node[CurrentStatus] = DROPPED
- - joinTargetNode($cross-desc-constraint, $cross-desc-constraint-Target, $cross-desc-constraint-Node)
- - joinTargetNode($referenced-descriptor, $referenced-descriptor-Target, $referenced-descriptor-Node)
-- name: cross-descriptor constraint is absent before referencing descriptor is dropped
- from: cross-desc-constraint-Node
- kind: Precedence
- to: referencing-descriptor-Node
- query:
- - $cross-desc-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $referencing-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - joinOnDescID($cross-desc-constraint, $referencing-descriptor, $desc-id)
- - toAbsent($cross-desc-constraint-Target, $referencing-descriptor-Target)
- - $cross-desc-constraint-Node[CurrentStatus] = ABSENT
- - $referencing-descriptor-Node[CurrentStatus] = DROPPED
- - joinTargetNode($cross-desc-constraint, $cross-desc-constraint-Target, $cross-desc-constraint-Node)
- - joinTargetNode($referencing-descriptor, $referencing-descriptor-Target, $referencing-descriptor-Node)
-- name: dependent view absent before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - toAbsent($view-Target, $index-Target)
- - $view-Node[CurrentStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view absent before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - transient($view-Target, $index-Target)
- - $view-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view absent before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - $view-Target[TargetStatus] = TRANSIENT_ABSENT
- - $view-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view absent before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - $view-Target[TargetStatus] = ABSENT
- - $view-Node[CurrentStatus] = ABSENT
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view no longer public before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - toAbsent($view-Target, $index-Target)
- - $view-Node[CurrentStatus] = DROPPED
- - $index-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view no longer public before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - transient($view-Target, $index-Target)
- - $view-Node[CurrentStatus] = TRANSIENT_DROPPED
- - $index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view no longer public before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - $view-Target[TargetStatus] = TRANSIENT_ABSENT
- - $view-Node[CurrentStatus] = TRANSIENT_DROPPED
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view no longer public before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - $view-Target[TargetStatus] = ABSENT
- - $view-Node[CurrentStatus] = DROPPED
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependents exist before descriptor becomes public
- from: dependent-Node
- kind: Precedence
- to: relation-Node
- query:
- - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.LDRJobIDs', '*scpb.Namespace', '*scpb.Owner', '*scpb.PartitionZoneConfig', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.Trigger', '*scpb.TriggerDeps', '*scpb.TriggerEnabled', '*scpb.TriggerEvents', '*scpb.TriggerFunctionCall', '*scpb.TriggerName', '*scpb.TriggerTiming', '*scpb.TriggerTransition', '*scpb.TriggerWhen', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges']
- - $relation[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - joinOnDescID($dependent, $relation, $relation-id)
- - ToPublicOrTransient($dependent-Target, $relation-Target)
- - $dependent-Node[CurrentStatus] = PUBLIC
- - $relation-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($relation, $relation-Target, $relation-Node)
-- name: dependents removed before column
- from: dependent-Node
- kind: Precedence
- to: column-Node
- query:
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($dependent, $column, $table-id, $col-id)
- - toAbsent($dependent-Target, $column-Target)
- - $dependent-Node[CurrentStatus] = ABSENT
- - $column-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: dependents removed before column
- from: dependent-Node
- kind: Precedence
- to: column-Node
- query:
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($dependent, $column, $table-id, $col-id)
- - transient($dependent-Target, $column-Target)
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: dependents removed before column
- from: dependent-Node
- kind: Precedence
- to: column-Node
- query:
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($dependent, $column, $table-id, $col-id)
- - $dependent-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: dependents removed before column
- from: dependent-Node
- kind: Precedence
- to: column-Node
- query:
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($dependent, $column, $table-id, $col-id)
- - $dependent-Target[TargetStatus] = ABSENT
- - $dependent-Node[CurrentStatus] = ABSENT
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: dependents removed before constraint
- from: dependents-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - toAbsent($dependents-Target, $constraint-Target)
- - $dependents-Node[CurrentStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed before constraint
- from: dependents-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - transient($dependents-Target, $constraint-Target)
- - $dependents-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed before constraint
- from: dependents-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - $dependents-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependents-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $constraint-Target[TargetStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed before constraint
- from: dependents-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - $dependents-Target[TargetStatus] = ABSENT
- - $dependents-Node[CurrentStatus] = ABSENT
- - $constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed before index
- from: dependent-Node
- kind: Precedence
- to: index-Node
- query:
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($dependent, $index, $table-id, $index-id)
- - toAbsent($dependent-Target, $index-Target)
- - $dependent-Node[CurrentStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependents removed before index
- from: dependent-Node
- kind: Precedence
- to: index-Node
- query:
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($dependent, $index, $table-id, $index-id)
- - transient($dependent-Target, $index-Target)
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependents removed before index
- from: dependent-Node
- kind: Precedence
- to: index-Node
- query:
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($dependent, $index, $table-id, $index-id)
- - $dependent-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependents removed before index
- from: dependent-Node
- kind: Precedence
- to: index-Node
- query:
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($dependent, $index, $table-id, $index-id)
- - $dependent-Target[TargetStatus] = ABSENT
- - $dependent-Node[CurrentStatus] = ABSENT
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependents removed right before simple constraint
- from: dependents-Node
- kind: SameStagePrecedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - toAbsent($dependents-Target, $constraint-Target)
- - $dependents-Node[CurrentStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed right before simple constraint
- from: dependents-Node
- kind: SameStagePrecedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - transient($dependents-Target, $constraint-Target)
- - $dependents-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed right before simple constraint
- from: dependents-Node
- kind: SameStagePrecedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - $dependents-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependents-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $constraint-Target[TargetStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed right before simple constraint
- from: dependents-Node
- kind: SameStagePrecedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - $dependents-Target[TargetStatus] = ABSENT
- - $dependents-Node[CurrentStatus] = ABSENT
- - $constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: descriptor drop right before removing dependent between types
- from: referenced-descriptor-Node
- kind: SameStagePrecedence
- to: referencing-via-type-Node
- query:
- - $referenced-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.EnumType']
- - $referenced-descriptor[DescID] = $fromDescID
- - $referencing-via-type[ReferencedTypeIDs] CONTAINS $fromDescID
- - $referencing-via-type[Type] = '*scpb.ColumnType'
- - toAbsent($referenced-descriptor-Target, $referencing-via-type-Target)
- - $referenced-descriptor-Node[CurrentStatus] = DROPPED
- - $referencing-via-type-Node[CurrentStatus] = ABSENT
- - joinTargetNode($referenced-descriptor, $referenced-descriptor-Target, $referenced-descriptor-Node)
- - joinTargetNode($referencing-via-type, $referencing-via-type-Target, $referencing-via-type-Node)
-- name: descriptor drop right before removing dependent with attr ref
- from: referenced-descriptor-Node
- kind: SameStagePrecedence
- to: referencing-via-attr-Node
- query:
- - $referenced-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $referencing-via-attr[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.LDRJobIDs', '*scpb.Namespace', '*scpb.Owner', '*scpb.PartitionZoneConfig', '*scpb.RowLevelTTL', '*scpb.SchemaComment', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.Trigger', '*scpb.TriggerDeps', '*scpb.TriggerEnabled', '*scpb.TriggerEvents', '*scpb.TriggerFunctionCall', '*scpb.TriggerName', '*scpb.TriggerTiming', '*scpb.TriggerTransition', '*scpb.TriggerWhen', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges']
- - joinReferencedDescID($referencing-via-attr, $referenced-descriptor, $desc-id)
- - toAbsent($referenced-descriptor-Target, $referencing-via-attr-Target)
- - $referenced-descriptor-Node[CurrentStatus] = DROPPED
- - $referencing-via-attr-Node[CurrentStatus] = ABSENT
- - joinTargetNode($referenced-descriptor, $referenced-descriptor-Target, $referenced-descriptor-Node)
- - joinTargetNode($referencing-via-attr, $referencing-via-attr-Target, $referencing-via-attr-Node)
-- name: descriptor drop right before removing dependent with expr ref to sequence
- from: referenced-descriptor-Node
- kind: SameStagePrecedence
- to: referencing-via-expr-Node
- query:
- - $referenced-descriptor[Type] = '*scpb.Sequence'
- - $referenced-descriptor[DescID] = $seqID
- - $referencing-via-expr[ReferencedSequenceIDs] CONTAINS $seqID
- - $referencing-via-expr[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.SecondaryIndexPartial']
- - toAbsent($referenced-descriptor-Target, $referencing-via-expr-Target)
- - $referenced-descriptor-Node[CurrentStatus] = DROPPED
- - $referencing-via-expr-Node[CurrentStatus] = ABSENT
- - joinTargetNode($referenced-descriptor, $referenced-descriptor-Target, $referenced-descriptor-Node)
- - joinTargetNode($referencing-via-expr, $referencing-via-expr-Target, $referencing-via-expr-Node)
-- name: descriptor drop right before removing dependent with function refs in columns
- from: referenced-descriptor-Node
- kind: SameStagePrecedence
- to: referencing-via-function-Node
- query:
- - $referenced-descriptor[Type] = '*scpb.Function'
- - $referenced-descriptor[DescID] = $fromDescID
- - $referencing-via-function[ReferencedFunctionIDs] CONTAINS $fromDescID
- - $referencing-via-function[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.SecondaryIndexPartial']
- - toAbsent($referenced-descriptor-Target, $referencing-via-function-Target)
- - $referenced-descriptor-Node[CurrentStatus] = DROPPED
- - $referencing-via-function-Node[CurrentStatus] = ABSENT
- - joinTargetNode($referenced-descriptor, $referenced-descriptor-Target, $referenced-descriptor-Node)
- - joinTargetNode($referencing-via-function, $referencing-via-function-Target, $referencing-via-function-Node)
-- name: descriptor drop right before removing dependent with type refs in expressions
- from: referenced-descriptor-Node
- kind: SameStagePrecedence
- to: referencing-via-type-Node
- query:
- - $referenced-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.EnumType']
- - $referenced-descriptor[DescID] = $fromDescID
- - $referencing-via-type[ReferencedTypeIDs] CONTAINS $fromDescID
- - descriptorIsNotBeingDropped-24.3($referencing-via-type)
- - $referencing-via-type[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.SecondaryIndexPartial']
- - toAbsent($referenced-descriptor-Target, $referencing-via-type-Target)
- - $referenced-descriptor-Node[CurrentStatus] = DROPPED
- - $referencing-via-type-Node[CurrentStatus] = ABSENT
- - joinTargetNode($referenced-descriptor, $referenced-descriptor-Target, $referenced-descriptor-Node)
- - joinTargetNode($referencing-via-type, $referencing-via-type-Target, $referencing-via-type-Node)
-- name: descriptor dropped before dependent element removal
- from: descriptor-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $dependent[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.LDRJobIDs', '*scpb.Namespace', '*scpb.Owner', '*scpb.PartitionZoneConfig', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.Trigger', '*scpb.TriggerDeps', '*scpb.TriggerEnabled', '*scpb.TriggerEvents', '*scpb.TriggerFunctionCall', '*scpb.TriggerName', '*scpb.TriggerTiming', '*scpb.TriggerTransition', '*scpb.TriggerWhen', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges']
- - joinOnDescID($descriptor, $dependent, $desc-id)
- - toAbsent($descriptor-Target, $dependent-Target)
- - $descriptor-Node[CurrentStatus] = DROPPED
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($descriptor, $descriptor-Target, $descriptor-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: descriptor dropped in transaction before removal
- from: dropped-Node
- kind: PreviousTransactionPrecedence
- to: absent-Node
- query:
- - $dropped[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $dropped[DescID] = $_
- - $dropped[Self] = $absent
- - toAbsent($dropped-Target, $absent-Target)
- - $dropped-Node[CurrentStatus] = DROPPED
- - $absent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dropped, $dropped-Target, $dropped-Node)
- - joinTargetNode($absent, $absent-Target, $absent-Node)
-- name: descriptor dropped right before removing back-reference in its parent descriptor
- from: descriptor-Node
- kind: SameStagePrecedence
- to: back-reference-in-parent-descriptor-Node
- query:
- - $descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $back-reference-in-parent-descriptor[Type] IN ['*scpb.SchemaChild', '*scpb.SchemaParent']
- - joinOnDescID($descriptor, $back-reference-in-parent-descriptor, $desc-id)
- - toAbsent($descriptor-Target, $back-reference-in-parent-descriptor-Target)
- - $descriptor-Node[CurrentStatus] = DROPPED
- - $back-reference-in-parent-descriptor-Node[CurrentStatus] = ABSENT
- - joinTargetNode($descriptor, $descriptor-Target, $descriptor-Node)
- - joinTargetNode($back-reference-in-parent-descriptor, $back-reference-in-parent-descriptor-Target, $back-reference-in-parent-descriptor-Node)
-- name: descriptor existence precedes dependents
- from: relation-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $relation[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseData', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexData', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.LDRJobIDs', '*scpb.Namespace', '*scpb.Owner', '*scpb.PartitionZoneConfig', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableData', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.Trigger', '*scpb.TriggerDeps', '*scpb.TriggerEnabled', '*scpb.TriggerEvents', '*scpb.TriggerFunctionCall', '*scpb.TriggerName', '*scpb.TriggerTiming', '*scpb.TriggerTransition', '*scpb.TriggerWhen', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges']
- - joinOnDescID($relation, $dependent, $relation-id)
- - ToPublicOrTransient($relation-Target, $dependent-Target)
- - $relation-Node[CurrentStatus] = DESCRIPTOR_ADDED
- - $dependent-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($relation, $relation-Target, $relation-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: descriptor removed right before garbage collection
- from: database-Node
- kind: SameStagePrecedence
- to: data-Node
- query:
- - $database[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $data[Type] = '*scpb.DatabaseData'
- - joinOnDescID($database, $data, $db-id)
- - toAbsent($database-Target, $data-Target)
- - $database-Node[CurrentStatus] = ABSENT
- - $data-Node[CurrentStatus] = DROPPED
- - joinTargetNode($database, $database-Target, $database-Node)
- - joinTargetNode($data, $data-Target, $data-Node)
-- name: ensure columns are in increasing order
- from: later-column-Node
- kind: Precedence
- to: earlier-column-Node
- query:
- - $later-column[Type] = '*scpb.Column'
- - joinTargetNode($later-column, $later-column-Target, $later-column-Node)
- - $earlier-column[Type] = '*scpb.Column'
- - joinOnDescID($later-column, $earlier-column, $table-id)
- - ToPublicOrTransient($later-column-Target, $earlier-column-Target)
- - $status IN [WRITE_ONLY, PUBLIC]
- - $later-column-Node[CurrentStatus] = $status
- - $earlier-column-Node[CurrentStatus] = $status
- - SmallerColumnIDFirst(*scpb.Column, *scpb.Column)($later-column, $earlier-column)
- - joinTargetNode($later-column, $later-column-Target, $later-column-Node)
- - joinTargetNode($earlier-column, $earlier-column-Target, $earlier-column-Node)
-- name: function name should be set before parent ids
- from: function-name-Node
- kind: Precedence
- to: function-parent-Node
- query:
- - $function-name[Type] = '*scpb.FunctionName'
- - $function-parent[Type] = '*scpb.SchemaChild'
- - joinOnDescID($function-name, $function-parent, $function-id)
- - ToPublicOrTransient($function-name-Target, $function-parent-Target)
- - $function-name-Node[CurrentStatus] = PUBLIC
- - $function-parent-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($function-name, $function-name-Target, $function-name-Node)
- - joinTargetNode($function-parent, $function-parent-Target, $function-parent-Node)
-- name: index data exists as soon as index accepts backfills
- from: index-name-Node
- kind: SameStagePrecedence
- to: index-Node
- query:
- - $index-name[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $index[Type] = '*scpb.IndexData'
- - joinOnIndexID($index-name, $index, $table-id, $index-id)
- - ToPublicOrTransient($index-name-Target, $index-Target)
- - $index-name-Node[CurrentStatus] = BACKFILL_ONLY
- - $index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($index-name, $index-name-Target, $index-name-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: index dependents exist before index becomes public
- from: dependent-Node
- kind: Precedence
- to: index-Node
- query:
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($dependent, $index, $table-id, $index-id)
- - ToPublicOrTransient($dependent-Target, $index-Target)
- - $dependent-Node[CurrentStatus] = PUBLIC
- - $index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: index drop mutation visible before cleaning up index columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] = '*scpb.IndexColumn'
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - toAbsent($index-Target, $dependent-Target)
- - $index-Node[CurrentStatus] = DELETE_ONLY
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index drop mutation visible before cleaning up index columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] = '*scpb.IndexColumn'
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - transient($index-Target, $dependent-Target)
- - $index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index drop mutation visible before cleaning up index columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] = '*scpb.IndexColumn'
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $dependent-Target[TargetStatus] = ABSENT
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index drop mutation visible before cleaning up index columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] = '*scpb.IndexColumn'
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = DELETE_ONLY
- - $dependent-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index existence precedes index dependents
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - ToPublicOrTransient($index-Target, $dependent-Target)
- - $index-Node[CurrentStatus] = BACKFILL_ONLY
- - $dependent-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index is MERGED before its temp index starts to disappear
- from: index-Node
- kind: Precedence
- to: temp-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $temp[Type] = '*scpb.TemporaryIndex'
- - joinOnDescID($index, $temp, $table-id)
- - $index[TemporaryIndexID] = $temp-index-id
- - $temp[IndexID] = $temp-index-id
- - $index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - $index-Node[CurrentStatus] = MERGED
- - $temp-Target[TargetStatus] = TRANSIENT_ABSENT
- - $temp-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($temp, $temp-Target, $temp-Node)
-- name: index is ready to be validated before we validate constraint on it
- from: index-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $index[Type] = '*scpb.PrimaryIndex'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - joinOnDescID($index, $constraint, $table-id)
- - $index[IndexID] = $index-id-for-validation
- - $constraint[IndexID] = $index-id-for-validation
- - ToPublicOrTransient($index-Target, $constraint-Target)
- - $index-Node[CurrentStatus] = VALIDATED
- - $constraint-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: index no longer public before dependents, excluding columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] IN ['*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - toAbsent($index-Target, $dependent-Target)
- - $index-Node[CurrentStatus] = VALIDATED
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index no longer public before dependents, excluding columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] IN ['*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - transient($index-Target, $dependent-Target)
- - $index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index no longer public before dependents, excluding columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] IN ['*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $dependent-Target[TargetStatus] = ABSENT
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index no longer public before dependents, excluding columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] IN ['*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = VALIDATED
- - $dependent-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index no longer public before index name
- from: index-Node
- kind: Precedence
- to: name-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $name[Type] = '*scpb.IndexName'
- - joinOnIndexID($index, $name, $table-id, $index-id)
- - toAbsent($index-Target, $name-Target)
- - $index-Node[CurrentStatus] = DELETE_ONLY
- - $name-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($name, $name-Target, $name-Node)
-- name: index no longer public before index name
- from: index-Node
- kind: Precedence
- to: name-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $name[Type] = '*scpb.IndexName'
- - joinOnIndexID($index, $name, $table-id, $index-id)
- - transient($index-Target, $name-Target)
- - $index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $name-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($name, $name-Target, $name-Node)
-- name: index no longer public before index name
- from: index-Node
- kind: Precedence
- to: name-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $name[Type] = '*scpb.IndexName'
- - joinOnIndexID($index, $name, $table-id, $index-id)
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $name-Target[TargetStatus] = ABSENT
- - $name-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($name, $name-Target, $name-Node)
-- name: index no longer public before index name
- from: index-Node
- kind: Precedence
- to: name-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $name[Type] = '*scpb.IndexName'
- - joinOnIndexID($index, $name, $table-id, $index-id)
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = DELETE_ONLY
- - $name-Target[TargetStatus] = TRANSIENT_ABSENT
- - $name-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($name, $name-Target, $name-Node)
-- name: index removed before garbage collection
- from: index-Node
- kind: Precedence
- to: index-data-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $index-data[Type] = '*scpb.IndexData'
- - joinOnIndexID($index, $index-data, $table-id, $index-id)
- - toAbsent($index-Target, $index-data-Target)
- - $index-Node[CurrentStatus] = ABSENT
- - $index-data-Node[CurrentStatus] = DROPPED
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($index-data, $index-data-Target, $index-data-Node)
-- name: index removed before garbage collection
- from: index-Node
- kind: Precedence
- to: index-data-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $index-data[Type] = '*scpb.IndexData'
- - joinOnIndexID($index, $index-data, $table-id, $index-id)
- - transient($index-Target, $index-data-Target)
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-data-Node[CurrentStatus] = TRANSIENT_DROPPED
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($index-data, $index-data-Target, $index-data-Node)
-- name: index removed before garbage collection
- from: index-Node
- kind: Precedence
- to: index-data-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $index-data[Type] = '*scpb.IndexData'
- - joinOnIndexID($index, $index-data, $table-id, $index-id)
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-data-Target[TargetStatus] = ABSENT
- - $index-data-Node[CurrentStatus] = DROPPED
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($index-data, $index-data-Target, $index-data-Node)
-- name: index removed before garbage collection
- from: index-Node
- kind: Precedence
- to: index-data-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $index-data[Type] = '*scpb.IndexData'
- - joinOnIndexID($index, $index-data, $table-id, $index-id)
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - $index-data-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-data-Node[CurrentStatus] = TRANSIENT_DROPPED
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($index-data, $index-data-Target, $index-data-Node)
-- name: index-column added to index before index is backfilled
- from: index-column-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-column[Type] = '*scpb.IndexColumn'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - joinOnIndexID($index-column, $index, $table-id, $index-id)
- - ToPublicOrTransient($index-column-Target, $index-Target)
- - $index-column-Node[CurrentStatus] = PUBLIC
- - $index-Node[CurrentStatus] = BACKFILLED
- - joinTargetNode($index-column, $index-column-Target, $index-column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: index-column added to index before temp index receives writes
- from: index-column-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-column[Type] = '*scpb.IndexColumn'
- - $index[Type] = '*scpb.TemporaryIndex'
- - joinOnIndexID($index-column, $index, $table-id, $index-id)
- - transient($index-column-Target, $index-Target)
- - $index-column-Node[CurrentStatus] = PUBLIC
- - $index-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($index-column, $index-column-Target, $index-column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: indexes containing column reach absent before column
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $column[Type] = '*scpb.Column'
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-24.3($index-column)
- - toAbsent($index-Target, $column-Target)
- - $index-Node[CurrentStatus] = ABSENT
- - $column-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: indexes containing column reach absent before column
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $column[Type] = '*scpb.Column'
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-24.3($index-column)
- - transient($index-Target, $column-Target)
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: indexes containing column reach absent before column
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $column[Type] = '*scpb.Column'
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-24.3($index-column)
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: indexes containing column reach absent before column
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $column[Type] = '*scpb.Column'
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-24.3($index-column)
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: namespace exist before schema parent
- from: dependent-Node
- kind: Precedence
- to: relation-Node
- query:
- - $dependent[Type] = '*scpb.Namespace'
- - $relation[Type] = '*scpb.SchemaParent'
- - joinOnDescID($dependent, $relation, $schema-id)
- - ToPublicOrTransient($dependent-Target, $relation-Target)
- - $dependent-Node[CurrentStatus] = PUBLIC
- - $relation-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($relation, $relation-Target, $relation-Node)
-- name: non-data dependents removed before descriptor
- from: dependent-Node
- kind: Precedence
- to: descriptor-Node
- query:
- - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.LDRJobIDs', '*scpb.Namespace', '*scpb.Owner', '*scpb.PartitionZoneConfig', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.Trigger', '*scpb.TriggerDeps', '*scpb.TriggerEnabled', '*scpb.TriggerEvents', '*scpb.TriggerFunctionCall', '*scpb.TriggerName', '*scpb.TriggerTiming', '*scpb.TriggerTransition', '*scpb.TriggerWhen', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges']
- - $descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - joinOnDescID($dependent, $descriptor, $desc-id)
- - toAbsent($dependent-Target, $descriptor-Target)
- - $dependent-Node[CurrentStatus] = ABSENT
- - $descriptor-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($descriptor, $descriptor-Target, $descriptor-Node)
-- name: old index absent before new index public when swapping with transient
- from: old-primary-index-Node
- kind: Precedence
- to: new-primary-index-Node
- query:
- - $old-primary-index[Type] = '*scpb.PrimaryIndex'
- - $transient-primary-index[Type] = '*scpb.PrimaryIndex'
- - $new-primary-index[Type] = '*scpb.PrimaryIndex'
- - joinOnDescID($old-primary-index, $transient-primary-index, $table-id)
- - $old-primary-index[IndexID] = $old-index-id
- - $transient-primary-index[SourceIndexID] = $old-index-id
- - joinOnDescID($transient-primary-index, $new-primary-index, $table-id)
- - $transient-primary-index[IndexID] = $transient-index-id
- - $new-primary-index[SourceIndexID] = $transient-index-id
- - $old-primary-index-Target[TargetStatus] = ABSENT
- - $old-primary-index-Node[CurrentStatus] = ABSENT
- - $new-primary-index-Target[TargetStatus] = PUBLIC
- - $new-primary-index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($old-primary-index, $old-primary-index-Target, $old-primary-index-Node)
- - joinTargetNode($new-primary-index, $new-primary-index-Target, $new-primary-index-Node)
-- name: partial predicate removed right before secondary index when not dropping relation
- from: partial-predicate-Node
- kind: SameStagePrecedence
- to: index-Node
- query:
- - $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-24.3($partial-predicate)
- - $index[Type] = '*scpb.SecondaryIndex'
- - joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- - toAbsent($partial-predicate-Target, $index-Target)
- - $partial-predicate-Node[CurrentStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($partial-predicate, $partial-predicate-Target, $partial-predicate-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: partial predicate removed right before secondary index when not dropping relation
- from: partial-predicate-Node
- kind: SameStagePrecedence
- to: index-Node
- query:
- - $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-24.3($partial-predicate)
- - $index[Type] = '*scpb.SecondaryIndex'
- - joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- - transient($partial-predicate-Target, $index-Target)
- - $partial-predicate-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($partial-predicate, $partial-predicate-Target, $partial-predicate-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: partial predicate removed right before secondary index when not dropping relation
- from: partial-predicate-Node
- kind: SameStagePrecedence
- to: index-Node
- query:
- - $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-24.3($partial-predicate)
- - $index[Type] = '*scpb.SecondaryIndex'
- - joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- - $partial-predicate-Target[TargetStatus] = TRANSIENT_ABSENT
- - $partial-predicate-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($partial-predicate, $partial-predicate-Target, $partial-predicate-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: partial predicate removed right before secondary index when not dropping relation
- from: partial-predicate-Node
- kind: SameStagePrecedence
- to: index-Node
- query:
- - $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-24.3($partial-predicate)
- - $index[Type] = '*scpb.SecondaryIndex'
- - joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- - $partial-predicate-Target[TargetStatus] = ABSENT
- - $partial-predicate-Node[CurrentStatus] = ABSENT
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($partial-predicate, $partial-predicate-Target, $partial-predicate-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: primary index named right before index becomes public
- from: index-name-Node
- kind: SameStagePrecedence
- to: index-Node
- query:
- - $index-name[Type] = '*scpb.IndexName'
- - $index[Type] = '*scpb.PrimaryIndex'
- - joinOnIndexID($index-name, $index, $table-id, $index-id)
- - ToPublicOrTransient($index-name-Target, $index-Target)
- - $index-name-Node[CurrentStatus] = PUBLIC
- - $index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($index-name, $index-name-Target, $index-name-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: primary index swap
- from: old-index-Node
- kind: SameStagePrecedence
- to: new-index-Node
- query:
- - $old-index[Type] = '*scpb.PrimaryIndex'
- - $new-index[Type] = '*scpb.PrimaryIndex'
- - joinOnDescID($old-index, $new-index, $table-id)
- - $old-index[IndexID] = $old-index-id
- - $new-index[SourceIndexID] = $old-index-id
- - $old-index-Target[TargetStatus] = ABSENT
- - $old-index-Node[CurrentStatus] = VALIDATED
- - $new-index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - $new-index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($old-index, $old-index-Target, $old-index-Node)
- - joinTargetNode($new-index, $new-index-Target, $new-index-Node)
-- name: primary index swap
- from: old-index-Node
- kind: SameStagePrecedence
- to: new-index-Node
- query:
- - $old-index[Type] = '*scpb.PrimaryIndex'
- - $new-index[Type] = '*scpb.PrimaryIndex'
- - joinOnDescID($old-index, $new-index, $table-id)
- - $old-index[IndexID] = $old-index-id
- - $new-index[SourceIndexID] = $old-index-id
- - $old-index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $old-index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $new-index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - $new-index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($old-index, $old-index-Target, $old-index-Node)
- - joinTargetNode($new-index, $new-index-Target, $new-index-Node)
-- name: primary index swap
- from: new-index-Node
- kind: SameStagePrecedence
- to: old-index-Node
- query:
- - $new-index[Type] = '*scpb.PrimaryIndex'
- - $old-index[Type] = '*scpb.PrimaryIndex'
- - joinOnDescID($new-index, $old-index, $table-id)
- - $new-index[SourceIndexID] = $old-index-id
- - $old-index[IndexID] = $old-index-id
- - $new-index-Target[TargetStatus] = ABSENT
- - $new-index-Node[CurrentStatus] = VALIDATED
- - $old-index-Target[TargetStatus] = PUBLIC
- - $old-index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($new-index, $new-index-Target, $new-index-Node)
- - joinTargetNode($old-index, $old-index-Target, $old-index-Node)
-- name: primary index with new columns should exist before secondary indexes
- from: primary-index-Node
- kind: Precedence
- to: secondary-index-Node
- query:
- - $primary-index[Type] = '*scpb.PrimaryIndex'
- - $secondary-index[Type] = '*scpb.SecondaryIndex'
- - joinOnDescID($primary-index, $secondary-index, $table-id)
- - $primary-index[IndexID] = $primary-index-id
- - $secondary-index[SourceIndexID] = $primary-index-id
- - ToPublicOrTransient($primary-index-Target, $secondary-index-Target)
- - $primary-index-Node[CurrentStatus] = PUBLIC
- - $secondary-index-Node[CurrentStatus] = BACKFILL_ONLY
- - joinTargetNode($primary-index, $primary-index-Target, $primary-index-Node)
- - joinTargetNode($secondary-index, $secondary-index-Target, $secondary-index-Node)
-- name: primary index with new columns should exist before temp indexes
- from: primary-index-Node
- kind: Precedence
- to: temp-index-Node
- query:
- - $primary-index[Type] = '*scpb.PrimaryIndex'
- - $temp-index[Type] = '*scpb.TemporaryIndex'
- - joinOnDescID($primary-index, $temp-index, $table-id)
- - $primary-index[IndexID] = $primary-index-id
- - $temp-index[SourceIndexID] = $primary-index-id
- - ToPublicOrTransient($primary-index-Target, $temp-index-Target)
- - $primary-index-Node[CurrentStatus] = PUBLIC
- - $temp-index-Node[CurrentStatus] = DELETE_ONLY
- - joinTargetNode($primary-index, $primary-index-Target, $primary-index-Node)
- - joinTargetNode($temp-index, $temp-index-Target, $temp-index-Node)
-- name: relation dropped before dependent column
- from: descriptor-Node
- kind: Precedence
- to: column-Node
- query:
- - $descriptor[Type] IN ['*scpb.Table', '*scpb.View', '*scpb.Sequence']
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($descriptor, $column, $desc-id)
- - toAbsent($descriptor-Target, $column-Target)
- - $descriptor-Node[CurrentStatus] = DROPPED
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($descriptor, $descriptor-Target, $descriptor-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: relation dropped before dependent constraint
- from: descriptor-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $descriptor[Type] = '*scpb.Table'
- - $constraint[Type] = '*scpb.ColumnNotNull'
- - joinOnDescID($descriptor, $constraint, $desc-id)
- - toAbsent($descriptor-Target, $constraint-Target)
- - $descriptor-Node[CurrentStatus] = DROPPED
- - $constraint-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($descriptor, $descriptor-Target, $descriptor-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: relation dropped before dependent index
- from: descriptor-Node
- kind: Precedence
- to: index-Node
- query:
- - $descriptor[Type] IN ['*scpb.Table', '*scpb.View']
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnDescID($descriptor, $index, $desc-id)
- - toAbsent($descriptor-Target, $index-Target)
- - $descriptor-Node[CurrentStatus] = DROPPED
- - $index-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($descriptor, $descriptor-Target, $descriptor-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: remove columns from index right before removing index
- from: index-column-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-column[Type] = '*scpb.IndexColumn'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($index-column, $index, $table-id, $index-id)
- - toAbsent($index-column-Target, $index-Target)
- - $index-column-Node[CurrentStatus] = DELETE_ONLY
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index-column, $index-column-Target, $index-column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: remove columns from index right before removing index
- from: index-column-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-column[Type] = '*scpb.IndexColumn'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($index-column, $index, $table-id, $index-id)
- - transient($index-column-Target, $index-Target)
- - $index-column-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index-column, $index-column-Target, $index-column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: remove columns from index right before removing index
- from: index-column-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-column[Type] = '*scpb.IndexColumn'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($index-column, $index, $table-id, $index-id)
- - $index-column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-column-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index-column, $index-column-Target, $index-column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: remove columns from index right before removing index
- from: index-column-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-column[Type] = '*scpb.IndexColumn'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($index-column, $index, $table-id, $index-id)
- - $index-column-Target[TargetStatus] = ABSENT
- - $index-column-Node[CurrentStatus] = DELETE_ONLY
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index-column, $index-column-Target, $index-column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: replacement secondary index should be validated before the old one becomes invisible
- from: new-index-Node
- kind: Precedence
- to: old-index-Node
- query:
- - $old-index[Type] = '*scpb.SecondaryIndex'
- - $new-index[Type] = '*scpb.SecondaryIndex'
- - $old-index-Target[TargetStatus] = ABSENT
- - $new-index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - joinOnDescID($old-index, $new-index, $table-id)
- - $new-index[IndexID] = $index-id
- - $old-index[IndexID] = $old-index-id
- - $new-index[RecreateSourceIndexID] = $old-index-id
- - joinTargetNode($old-index, $old-index-Target, $old-index-Node)
- - joinTargetNode($new-index, $new-index-Target, $new-index-Node)
- - $new-index-Node[CurrentStatus] = PUBLIC
- - $old-index-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($new-index, $new-index-Target, $new-index-Node)
- - joinTargetNode($old-index, $old-index-Target, $old-index-Node)
-- name: schedule all GC jobs for a descriptor in the same stage
- from: data-a-Node
- kind: SameStagePrecedence
- to: data-b-Node
- query:
- - $data-a[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - $data-b[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - joinOnDescID($data-a, $data-b, $desc-id)
- - SmallerIDsFirst(scpb.Element, scpb.Element)($data-a, $data-b)
- - toAbsent($data-a-Target, $data-b-Target)
- - $data-a-Node[CurrentStatus] = DROPPED
- - $data-b-Node[CurrentStatus] = DROPPED
- - joinTargetNode($data-a, $data-a-Target, $data-a-Node)
- - joinTargetNode($data-b, $data-b-Target, $data-b-Node)
-- name: schedule all GC jobs for a descriptor in the same stage
- from: data-a-Node
- kind: SameStagePrecedence
- to: data-b-Node
- query:
- - $data-a[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - $data-b[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - joinOnDescID($data-a, $data-b, $desc-id)
- - SmallerIDsFirst(scpb.Element, scpb.Element)($data-a, $data-b)
- - transient($data-a-Target, $data-b-Target)
- - $data-a-Node[CurrentStatus] = TRANSIENT_DROPPED
- - $data-b-Node[CurrentStatus] = TRANSIENT_DROPPED
- - joinTargetNode($data-a, $data-a-Target, $data-a-Node)
- - joinTargetNode($data-b, $data-b-Target, $data-b-Node)
-- name: schedule all GC jobs for a descriptor in the same stage
- from: data-a-Node
- kind: SameStagePrecedence
- to: data-b-Node
- query:
- - $data-a[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - $data-b[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - joinOnDescID($data-a, $data-b, $desc-id)
- - SmallerIDsFirst(scpb.Element, scpb.Element)($data-a, $data-b)
- - $data-a-Target[TargetStatus] = TRANSIENT_ABSENT
- - $data-a-Node[CurrentStatus] = TRANSIENT_DROPPED
- - $data-b-Target[TargetStatus] = ABSENT
- - $data-b-Node[CurrentStatus] = DROPPED
- - joinTargetNode($data-a, $data-a-Target, $data-a-Node)
- - joinTargetNode($data-b, $data-b-Target, $data-b-Node)
-- name: schedule all GC jobs for a descriptor in the same stage
- from: data-a-Node
- kind: SameStagePrecedence
- to: data-b-Node
- query:
- - $data-a[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - $data-b[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - joinOnDescID($data-a, $data-b, $desc-id)
- - SmallerIDsFirst(scpb.Element, scpb.Element)($data-a, $data-b)
- - $data-a-Target[TargetStatus] = ABSENT
- - $data-a-Node[CurrentStatus] = DROPPED
- - $data-b-Target[TargetStatus] = TRANSIENT_ABSENT
- - $data-b-Node[CurrentStatus] = TRANSIENT_DROPPED
- - joinTargetNode($data-a, $data-a-Target, $data-a-Node)
- - joinTargetNode($data-b, $data-b-Target, $data-b-Node)
-- name: secondary index named before public (with index swap)
- from: index-Node
- kind: Precedence
- to: index-name-Node
- query:
- - $index-name[Type] = '*scpb.IndexName'
- - $index[Type] = '*scpb.SecondaryIndex'
- - joinOnIndexID($index, $index-name, $table-id, $index-id)
- - ToPublicOrTransient($index-Target, $index-name-Target)
- - $index-Node[CurrentStatus] = VALIDATED
- - $index-name-Node[CurrentStatus] = PUBLIC
- - $old-index[Type] = '*scpb.SecondaryIndex'
- - $new-index[Type] = '*scpb.SecondaryIndex'
- - $old-index-Target[TargetStatus] = ABSENT
- - $new-index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - joinOnDescID($old-index, $new-index, $table-id)
- - $new-index[IndexID] = $index-id
- - $old-index[IndexID] = $old-index-id
- - $new-index[RecreateSourceIndexID] = $old-index-id
- - joinTargetNode($old-index, $old-index-Target, $old-index-Node)
- - joinTargetNode($new-index, $new-index-Target, $new-index-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($index-name, $index-name-Target, $index-name-Node)
-- name: secondary index named before validation (without index swap)
- from: index-name-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-name[Type] = '*scpb.IndexName'
- - $index[Type] = '*scpb.SecondaryIndex'
- - joinOnIndexID($index-name, $index, $table-id, $index-id)
- - no secondary index swap is on going($table-id, $index-id)
- - ToPublicOrTransient($index-name-Target, $index-Target)
- - $index-name-Node[CurrentStatus] = PUBLIC
- - $index-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($index-name, $index-name-Target, $index-name-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- - toAbsent($secondary-partial-index-Target, $column-Target)
- - $secondary-partial-index-Node[CurrentStatus] = DELETE_ONLY
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- - transient($secondary-partial-index-Target, $column-Target)
- - $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- - $secondary-partial-index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- - $secondary-partial-index-Target[TargetStatus] = ABSENT
- - $secondary-partial-index-Node[CurrentStatus] = DELETE_ONLY
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- - toAbsent($secondary-partial-index-Target, $column-Target)
- - $secondary-partial-index-Node[CurrentStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- - transient($secondary-partial-index-Target, $column-Target)
- - $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- - $secondary-partial-index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- - $secondary-partial-index-Target[TargetStatus] = ABSENT
- - $secondary-partial-index-Node[CurrentStatus] = ABSENT
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index should be validated before dependent view can be absent
- from: index-Node
- kind: Precedence
- to: view-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $view[Type] = '*scpb.View'
- - viewReferencesIndex(*scpb.SecondaryIndex, *scpb.View)($index, $view)
- - toAbsent($index-Target, $view-Target)
- - $index-Node[CurrentStatus] = VALIDATED
- - $view-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($view, $view-Target, $view-Node)
-- name: secondary index should be validated before dependent view can be absent
- from: index-Node
- kind: Precedence
- to: view-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $view[Type] = '*scpb.View'
- - viewReferencesIndex(*scpb.SecondaryIndex, *scpb.View)($index, $view)
- - transient($index-Target, $view-Target)
- - $index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $view-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($view, $view-Target, $view-Node)
-- name: secondary index should be validated before dependent view can be absent
- from: index-Node
- kind: Precedence
- to: view-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $view[Type] = '*scpb.View'
- - viewReferencesIndex(*scpb.SecondaryIndex, *scpb.View)($index, $view)
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $view-Target[TargetStatus] = ABSENT
- - $view-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($view, $view-Target, $view-Node)
-- name: secondary index should be validated before dependent view can be absent
- from: index-Node
- kind: Precedence
- to: view-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $view[Type] = '*scpb.View'
- - viewReferencesIndex(*scpb.SecondaryIndex, *scpb.View)($index, $view)
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = VALIDATED
- - $view-Target[TargetStatus] = TRANSIENT_ABSENT
- - $view-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($view, $view-Target, $view-Node)
-- name: secondary indexes containing column as key reach write-only before column
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $column[Type] = '*scpb.Column'
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - toAbsent($index-Target, $column-Target)
- - $index-Node[CurrentStatus] = VALIDATED
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($index-column)
- - isIndexKeyColumnKey(*scpb.IndexColumn)($index-column)
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: simple constraint public right before its dependents
- from: simple-constraint-Node
- kind: SameStagePrecedence
- to: dependent-Node
- query:
- - $simple-constraint[Type] = '*scpb.ColumnNotNull'
- - $dependent[Type] IN ['*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName']
- - joinOnConstraintID($simple-constraint, $dependent, $table-id, $constraint-id)
- - ToPublicOrTransient($simple-constraint-Target, $dependent-Target)
- - $simple-constraint-Node[CurrentStatus] = PUBLIC
- - $dependent-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($simple-constraint, $simple-constraint-Target, $simple-constraint-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: simple constraint visible before name
- from: simple-constraint-Node
- kind: Precedence
- to: constraint-name-Node
- query:
- - $simple-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - joinOnConstraintID($simple-constraint, $constraint-name, $table-id, $constraint-id)
- - ToPublicOrTransient($simple-constraint-Target, $constraint-name-Target)
- - $simple-constraint-Node[CurrentStatus] = WRITE_ONLY
- - $constraint-name-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($simple-constraint, $simple-constraint-Target, $simple-constraint-Node)
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
-- name: swapped primary index public before column
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] = '*scpb.PrimaryIndex'
- - $column[Type] = '*scpb.Column'
- - ColumnInSwappedInPrimaryIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - ToPublicOrTransient($index-Target, $column-Target)
- - $index-Node[CurrentStatus] = PUBLIC
- - $column-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: table added right before data element
- from: table-Node
- kind: Precedence
- to: data-Node
- query:
- - $table[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $data[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - joinOnDescID($table, $data, $table-id)
- - ToPublicOrTransient($table-Target, $data-Target)
- - $table-Node[CurrentStatus] = PUBLIC
- - $data-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($table, $table-Target, $table-Node)
- - joinTargetNode($data, $data-Target, $data-Node)
-- name: table removed right before garbage collection
- from: table-Node
- kind: SameStagePrecedence
- to: data-Node
- query:
- - $table[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $data[Type] = '*scpb.TableData'
- - joinOnDescID($table, $data, $table-id)
- - toAbsent($table-Target, $data-Target)
- - $table-Node[CurrentStatus] = ABSENT
- - $data-Node[CurrentStatus] = DROPPED
- - joinTargetNode($table, $table-Target, $table-Node)
- - joinTargetNode($data, $data-Target, $data-Node)
-- name: temp index data exists as soon as temp index accepts writes
- from: temp-index-Node
- kind: SameStagePrecedence
- to: temp-index-data-Node
- query:
- - $temp-index[Type] = '*scpb.TemporaryIndex'
- - $temp-index-data[Type] = '*scpb.IndexData'
- - joinOnIndexID($temp-index, $temp-index-data, $table-id, $index-id)
- - ToPublicOrTransient($temp-index-Target, $temp-index-data-Target)
- - $temp-index-Node[CurrentStatus] = WRITE_ONLY
- - $temp-index-data-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($temp-index, $temp-index-Target, $temp-index-Node)
- - joinTargetNode($temp-index-data, $temp-index-data-Target, $temp-index-data-Node)
-- name: temp index disappeared before its master index reaches WRITE_ONLY
- from: temp-Node
- kind: Precedence
- to: index-Node
- query:
- - $temp[Type] = '*scpb.TemporaryIndex'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - joinOnDescID($temp, $index, $table-id)
- - $temp[IndexID] = $temp-index-id
- - $index[TemporaryIndexID] = $temp-index-id
- - $temp-Target[TargetStatus] = TRANSIENT_ABSENT
- - $temp-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - $index-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($temp, $temp-Target, $temp-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: temp index existence precedes index dependents
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] = '*scpb.TemporaryIndex'
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - ToPublicOrTransient($index-Target, $dependent-Target)
- - $index-Node[CurrentStatus] = DELETE_ONLY
- - $dependent-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: temp index is WRITE_ONLY before backfill
- from: temp-Node
- kind: Precedence
- to: index-Node
- query:
- - $temp[Type] = '*scpb.TemporaryIndex'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - joinOnDescID($temp, $index, $table-id)
- - $temp[IndexID] = $temp-index-id
- - $index[TemporaryIndexID] = $temp-index-id
- - $temp-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - $temp-Node[CurrentStatus] = WRITE_ONLY
- - $index-Node[CurrentStatus] = BACKFILLED
- - joinTargetNode($temp, $temp-Target, $temp-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-
-deprules
-----
-- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: TRANSIENT_ABSENT->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_VALIDATED_WRITE_ONLY_TRANSIENT_WRITE_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: ABSENT->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: PUBLIC->TRANSIENT_VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->TRANSIENT_ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $next-Node[CurrentStatus] = TRANSIENT_ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_WRITE_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->TRANSIENT_VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'CheckConstraint transitions to TRANSIENT_ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.CheckConstraint'
- - $next[Type] = '*scpb.CheckConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'Column transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.Column'
- - $next[Type] = '*scpb.Column'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'Column transitions to ABSENT uphold 2-version invariant: PUBLIC->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.Column'
- - $next[Type] = '*scpb.Column'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'Column transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.Column'
- - $next[Type] = '*scpb.Column'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'Column transitions to PUBLIC uphold 2-version invariant: ABSENT->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.Column'
- - $next[Type] = '*scpb.Column'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'Column transitions to PUBLIC uphold 2-version invariant: DELETE_ONLY->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.Column'
- - $next[Type] = '*scpb.Column'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'Column transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.Column'
- - $next[Type] = '*scpb.Column'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ColumnNotNull transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ColumnNotNull'
- - $next[Type] = '*scpb.ColumnNotNull'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ColumnNotNull transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ColumnNotNull'
- - $next[Type] = '*scpb.ColumnNotNull'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_WRITE_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ColumnNotNull transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ColumnNotNull'
- - $next[Type] = '*scpb.ColumnNotNull'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ColumnNotNull transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ColumnNotNull'
- - $next[Type] = '*scpb.ColumnNotNull'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ColumnNotNull transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ColumnNotNull'
- - $next[Type] = '*scpb.ColumnNotNull'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ColumnNotNull transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ColumnNotNull'
- - $next[Type] = '*scpb.ColumnNotNull'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: Computed column expression is dropped before the column it depends on
- from: column-expr-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-expr[Type] = '*scpb.ColumnComputeExpression'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($column-expr, $column, $table-id)
- - computedColumnTypeReferencesColumn(*scpb.ColumnComputeExpression, *scpb.Column)($column-expr, $column)
- - toAbsent($column-expr-Target, $column-Target)
- - $column-expr-Node[CurrentStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column-expr, $column-expr-Target, $column-expr-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: Computed column expression is dropped before the column it depends on
- from: column-expr-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-expr[Type] = '*scpb.ColumnComputeExpression'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($column-expr, $column, $table-id)
- - computedColumnTypeReferencesColumn(*scpb.ColumnComputeExpression, *scpb.Column)($column-expr, $column)
- - transient($column-expr-Target, $column-Target)
- - $column-expr-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($column-expr, $column-expr-Target, $column-expr-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: Computed column expression is dropped before the column it depends on
- from: column-expr-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-expr[Type] = '*scpb.ColumnComputeExpression'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($column-expr, $column, $table-id)
- - computedColumnTypeReferencesColumn(*scpb.ColumnComputeExpression, *scpb.Column)($column-expr, $column)
- - $column-expr-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-expr-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column-expr, $column-expr-Target, $column-expr-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: Computed column expression is dropped before the column it depends on
- from: column-expr-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-expr[Type] = '*scpb.ColumnComputeExpression'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($column-expr, $column, $table-id)
- - computedColumnTypeReferencesColumn(*scpb.ColumnComputeExpression, *scpb.Column)($column-expr, $column)
- - $column-expr-Target[TargetStatus] = ABSENT
- - $column-expr-Node[CurrentStatus] = ABSENT
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($column-expr, $column-expr-Target, $column-expr-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: Constraint should be hidden before name
- from: constraint-name-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($constraint-name, $constraint, $table-id, $constraint-id)
- - toAbsent($constraint-name-Target, $constraint-Target)
- - $constraint-name-Node[CurrentStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = ABSENT
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: Constraint should be hidden before name
- from: constraint-name-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($constraint-name, $constraint, $table-id, $constraint-id)
- - transient($constraint-name-Target, $constraint-Target)
- - $constraint-name-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: Constraint should be hidden before name
- from: constraint-name-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($constraint-name, $constraint, $table-id, $constraint-id)
- - $constraint-name-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-name-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $constraint-Target[TargetStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = ABSENT
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: Constraint should be hidden before name
- from: constraint-name-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($constraint-name, $constraint, $table-id, $constraint-id)
- - $constraint-name-Target[TargetStatus] = ABSENT
- - $constraint-name-Node[CurrentStatus] = ABSENT
- - $constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: Constraint should be hidden before name
- from: constraint-Node
- kind: Precedence
- to: constraint-name-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - joinOnConstraintID($constraint, $constraint-name, $table-id, $constraint-id)
- - toAbsent($constraint-Target, $constraint-name-Target)
- - $constraint-Node[CurrentStatus] = VALIDATED
- - $constraint-name-Node[CurrentStatus] = ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
-- name: Constraint should be hidden before name
- from: constraint-Node
- kind: Precedence
- to: constraint-name-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - joinOnConstraintID($constraint, $constraint-name, $table-id, $constraint-id)
- - transient($constraint-Target, $constraint-name-Target)
- - $constraint-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $constraint-name-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
-- name: Constraint should be hidden before name
- from: constraint-Node
- kind: Precedence
- to: constraint-name-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - joinOnConstraintID($constraint, $constraint-name, $table-id, $constraint-id)
- - $constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $constraint-name-Target[TargetStatus] = ABSENT
- - $constraint-name-Node[CurrentStatus] = ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
-- name: Constraint should be hidden before name
- from: constraint-Node
- kind: Precedence
- to: constraint-name-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - joinOnConstraintID($constraint, $constraint-name, $table-id, $constraint-id)
- - $constraint-Target[TargetStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = VALIDATED
- - $constraint-name-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-name-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
-- name: DEFAULT or ON UPDATE existence precedes writes to column
- from: expr-Node
- kind: Precedence
- to: column-Node
- query:
- - $expr[Type] IN ['*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($expr, $column, $table-id, $col-id)
- - ToPublicOrTransient($expr-Target, $column-Target)
- - $expr-Node[CurrentStatus] = PUBLIC
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($expr, $expr-Target, $expr-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: 'ForeignKeyConstraint transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ForeignKeyConstraint'
- - $next[Type] = '*scpb.ForeignKeyConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ForeignKeyConstraint transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ForeignKeyConstraint'
- - $next[Type] = '*scpb.ForeignKeyConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_WRITE_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ForeignKeyConstraint transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ForeignKeyConstraint'
- - $next[Type] = '*scpb.ForeignKeyConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ForeignKeyConstraint transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ForeignKeyConstraint'
- - $next[Type] = '*scpb.ForeignKeyConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ForeignKeyConstraint transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ForeignKeyConstraint'
- - $next[Type] = '*scpb.ForeignKeyConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'ForeignKeyConstraint transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.ForeignKeyConstraint'
- - $next[Type] = '*scpb.ForeignKeyConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: New primary index should go public only after columns being dropped move to WRITE_ONLY
- from: column-Node
- kind: Precedence
- to: new-primary-index-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $new-primary-index[Type] = '*scpb.PrimaryIndex'
- - ColumnInSourcePrimaryIndex($index-column, $new-primary-index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - $new-primary-index-Target[TargetStatus] = PUBLIC
- - $new-primary-index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($new-primary-index, $new-primary-index-Target, $new-primary-index-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = BACKFILLED
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILL_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = BACKFILL_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_DELETE_ONLY_BACKFILLED_TRANSIENT_BACKFILLED_BACKFILL_ONLY_TRANSIENT_BACKFILL_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: MERGED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = MERGED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: MERGE_ONLY->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = MERGE_ONLY
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_ABSENT->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_BACKFILLED->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_BACKFILLED
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_BACKFILL_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_BACKFILL_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_MERGED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_MERGED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_MERGE_ONLY->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_MERGE_ONLY
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: VALIDATED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_VALIDATED($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_VALIDATED_TRANSIENT_WRITE_ONLY_MERGE_ONLY_TRANSIENT_MERGE_ONLY_MERGED_TRANSIENT_MERGED($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: ABSENT->BACKFILL_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = BACKFILL_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = BACKFILLED
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILL_ONLY->BACKFILLED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = BACKFILL_ONLY
- - $next-Node[CurrentStatus] = BACKFILLED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: DELETE_ONLY->MERGE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = MERGE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: MERGED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = MERGED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: MERGE_ONLY->MERGED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = MERGE_ONLY
- - $next-Node[CurrentStatus] = MERGED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: ABSENT->BACKFILL_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = BACKFILL_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = BACKFILLED
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: BACKFILL_ONLY->BACKFILLED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = BACKFILL_ONLY
- - $next-Node[CurrentStatus] = BACKFILLED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: DELETE_ONLY->MERGE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = MERGE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: MERGED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = MERGED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: MERGE_ONLY->MERGED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = MERGE_ONLY
- - $next-Node[CurrentStatus] = MERGED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: PUBLIC->TRANSIENT_VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_BACKFILLED->TRANSIENT_DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_BACKFILLED
- - $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_BACKFILL_ONLY->TRANSIENT_DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_BACKFILL_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->TRANSIENT_ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_BACKFILLED_TRANSIENT_BACKFILL_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_MERGED->TRANSIENT_WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_MERGED
- - $next-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_MERGE_ONLY->TRANSIENT_WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_MERGE_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_VALIDATED->TRANSIENT_WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $next-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_WRITE_ONLY->TRANSIENT_DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_VALIDATED_TRANSIENT_MERGE_ONLY_TRANSIENT_MERGED($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'PrimaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.PrimaryIndex'
- - $next[Type] = '*scpb.PrimaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = BACKFILLED
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: BACKFILL_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = BACKFILL_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_BACKFILLED_BACKFILL_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: MERGED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = MERGED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: MERGE_ONLY->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = MERGE_ONLY
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: VALIDATED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_VALIDATED_MERGE_ONLY_MERGED($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: ABSENT->BACKFILL_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = BACKFILL_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILLED->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = BACKFILLED
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: BACKFILL_ONLY->BACKFILLED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = BACKFILL_ONLY
- - $next-Node[CurrentStatus] = BACKFILLED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: DELETE_ONLY->MERGE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = MERGE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: MERGED->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = MERGED
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: MERGE_ONLY->MERGED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = MERGE_ONLY
- - $next-Node[CurrentStatus] = MERGED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'SecondaryIndex transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.SecondaryIndex'
- - $next[Type] = '*scpb.SecondaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: DELETE_ONLY->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_TRANSIENT_DELETE_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_ABSENT->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: ABSENT->DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: DELETE_ONLY->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = DELETE_ONLY
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: TRANSIENT_DELETE_ONLY->TRANSIENT_ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'TemporaryIndex transitions to TRANSIENT_ABSENT uphold 2-version invariant: WRITE_ONLY->TRANSIENT_DELETE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.TemporaryIndex'
- - $next[Type] = '*scpb.TemporaryIndex'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = TRANSIENT_ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'UniqueWithoutIndexConstraint transitions to ABSENT uphold 2-version invariant: PUBLIC->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $next[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = PUBLIC
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'UniqueWithoutIndexConstraint transitions to ABSENT uphold 2-version invariant: VALIDATED->ABSENT'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $next[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = ABSENT
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - nodeNotExistsWithStatusIn_WRITE_ONLY($prev-Target)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'UniqueWithoutIndexConstraint transitions to ABSENT uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $next[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = ABSENT
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'UniqueWithoutIndexConstraint transitions to PUBLIC uphold 2-version invariant: ABSENT->WRITE_ONLY'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $next[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = ABSENT
- - $next-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'UniqueWithoutIndexConstraint transitions to PUBLIC uphold 2-version invariant: VALIDATED->PUBLIC'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $next[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = VALIDATED
- - $next-Node[CurrentStatus] = PUBLIC
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: 'UniqueWithoutIndexConstraint transitions to PUBLIC uphold 2-version invariant: WRITE_ONLY->VALIDATED'
- from: prev-Node
- kind: PreviousTransactionPrecedence
- to: next-Node
- query:
- - $prev[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $next[Type] = '*scpb.UniqueWithoutIndexConstraint'
- - $prev[DescID] = $descID
- - $prev[Self] = $next
- - $prev-Target[Self] = $next-Target
- - $prev-Target[TargetStatus] = PUBLIC
- - $prev-Node[CurrentStatus] = WRITE_ONLY
- - $next-Node[CurrentStatus] = VALIDATED
- - descriptorIsNotBeingDropped-24.3($prev)
- - $descriptor-data[Type] = '*scpb.TableData'
- - joinTargetNode($descriptor-data, $descriptor-data-Target, $descriptor-data-Node)
- - $descriptor-data-Node[CurrentStatus] = PUBLIC
- - $descriptor-data[DescID] = $descID
- - descriptorIsDataNotBeingAdded-24.3($descID)
- - joinTargetNode($prev, $prev-Target, $prev-Node)
- - joinTargetNode($next, $next-Target, $next-Node)
-- name: all adding indexes reached BACKFILL_ONLY before any of their columns disappear
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $column[Type] = '*scpb.Column'
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - $index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - $index-Node[CurrentStatus] = BACKFILL_ONLY
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: back-reference in parent descriptor is removed before parent descriptor is dropped
- from: back-reference-in-parent-descriptor-Node
- kind: Precedence
- to: parent-descriptor-Node
- query:
- - $back-reference-in-parent-descriptor[Type] IN ['*scpb.SchemaChild', '*scpb.SchemaParent']
- - $parent-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - joinReferencedDescID($back-reference-in-parent-descriptor, $parent-descriptor, $desc-id)
- - toAbsent($back-reference-in-parent-descriptor-Target, $parent-descriptor-Target)
- - $back-reference-in-parent-descriptor-Node[CurrentStatus] = ABSENT
- - $parent-descriptor-Node[CurrentStatus] = DROPPED
- - joinTargetNode($back-reference-in-parent-descriptor, $back-reference-in-parent-descriptor-Target, $back-reference-in-parent-descriptor-Node)
- - joinTargetNode($parent-descriptor, $parent-descriptor-Target, $parent-descriptor-Node)
-- name: column constraint removed right before column reaches write only
- from: column-constraint-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($column-constraint, $column, $table-id, $col-id)
- - toAbsent($column-constraint-Target, $column-Target)
- - $column-constraint-Node[CurrentStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column-constraint, $column-constraint-Target, $column-constraint-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: column constraint removed right before column reaches write only
- from: column-constraint-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($column-constraint, $column, $table-id, $col-id)
- - transient($column-constraint-Target, $column-Target)
- - $column-constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($column-constraint, $column-constraint-Target, $column-constraint-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: column constraint removed right before column reaches write only
- from: column-constraint-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($column-constraint, $column, $table-id, $col-id)
- - $column-constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column-constraint, $column-constraint-Target, $column-constraint-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: column constraint removed right before column reaches write only
- from: column-constraint-Node
- kind: Precedence
- to: column-Node
- query:
- - $column-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($column-constraint, $column, $table-id, $col-id)
- - $column-constraint-Target[TargetStatus] = ABSENT
- - $column-constraint-Node[CurrentStatus] = ABSENT
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($column-constraint, $column-constraint-Target, $column-constraint-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: column dependents exist before column becomes public
- from: dependent-Node
- kind: Precedence
- to: column-Node
- query:
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($dependent, $column, $table-id, $col-id)
- - ToPublicOrTransient($dependent-Target, $column-Target)
- - $dependent-Node[CurrentStatus] = PUBLIC
- - $column-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: column existence precedes column dependents
- from: column-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - joinOnColumnID($column, $dependent, $table-id, $col-id)
- - ToPublicOrTransient($column-Target, $dependent-Target)
- - $column-Node[CurrentStatus] = DELETE_ONLY
- - $dependent-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: column existence precedes index existence
- from: column-Node
- kind: Precedence
- to: index-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - joinOnColumnID($column, $index-column, $table-id, $column-id)
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - ToPublicOrTransient($column-Target, $index-Target)
- - $column-Node[CurrentStatus] = DELETE_ONLY
- - $index-Node[CurrentStatus] = BACKFILL_ONLY
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: column existence precedes temp index existence
- from: column-Node
- kind: Precedence
- to: index-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $index[Type] = '*scpb.TemporaryIndex'
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - ToPublicOrTransient($column-Target, $index-Target)
- - $column-Node[CurrentStatus] = DELETE_ONLY
- - $index-Node[CurrentStatus] = DELETE_ONLY
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: column is WRITE_ONLY before temporary index is WRITE_ONLY
- from: column-Node
- kind: Precedence
- to: index-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $index[Type] = '*scpb.TemporaryIndex'
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - ToPublicOrTransient($column-Target, $index-Target)
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - $index-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: column name and type set right after column existence
- from: column-Node
- kind: SameStagePrecedence
- to: column-name-or-type-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $column-name-or-type[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType']
- - ToPublicOrTransient($column-Target, $column-name-or-type-Target)
- - $column-Node[CurrentStatus] = DELETE_ONLY
- - $column-name-or-type-Node[CurrentStatus] = PUBLIC
- - joinOnColumnID($column, $column-name-or-type, $table-id, $col-id)
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($column-name-or-type, $column-name-or-type-Target, $column-name-or-type-Node)
-- name: column no longer public before dependents
- from: column-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - joinOnColumnID($column, $dependent, $table-id, $col-id)
- - toAbsent($column-Target, $dependent-Target)
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: column no longer public before dependents
- from: column-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - joinOnColumnID($column, $dependent, $table-id, $col-id)
- - transient($column-Target, $dependent-Target)
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: column no longer public before dependents
- from: column-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - joinOnColumnID($column, $dependent, $table-id, $col-id)
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - $dependent-Target[TargetStatus] = ABSENT
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: column no longer public before dependents
- from: column-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - joinOnColumnID($column, $dependent, $table-id, $col-id)
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - $dependent-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: column public before non-index-backed constraint (including hash-sharded) is created
- from: column-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - $column[ColumnID] = $columnID
- - $constraint[ReferencedColumnIDs] CONTAINS $columnID
- - joinOnDescID($column, $constraint, $table-id)
- - ToPublicOrTransient($column-Target, $constraint-Target)
- - $column-Node[CurrentStatus] = PUBLIC
- - $constraint-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: column type dependents removed right before column type
- from: dependent-Node
- kind: SameStagePrecedence
- to: column-type-Node
- query:
- - $dependent[Type] IN ['*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner']
- - $column-type[Type] = '*scpb.ColumnType'
- - joinOnColumnID($dependent, $column-type, $table-id, $col-id)
- - toAbsent($dependent-Target, $column-type-Target)
- - $dependent-Node[CurrentStatus] = ABSENT
- - $column-type-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($column-type, $column-type-Target, $column-type-Node)
-- name: column type is changed to public after doing validation of a transient check constraint
- from: transient-check-constraint-Node
- kind: SameStagePrecedence
- to: column-type-Node
- query:
- - $transient-check-constraint[Type] = '*scpb.CheckConstraint'
- - $column-type[Type] = '*scpb.ColumnType'
- - joinOnDescID($transient-check-constraint, $column-type, $table-id)
- - $column-type[ColumnID] = $columnID
- - $transient-check-constraint[ReferencedColumnIDs] CONTAINS $columnID
- - $transient-check-constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-type-Target[TargetStatus] = PUBLIC
- - $transient-check-constraint-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $column-type-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($transient-check-constraint, $transient-check-constraint-Target, $transient-check-constraint-Node)
- - joinTargetNode($column-type, $column-type-Target, $column-type-Node)
-- name: column type removed before column family
- from: column-type-Node
- kind: Precedence
- to: column-family-Node
- query:
- - $column-type[Type] = '*scpb.ColumnType'
- - $column-family[Type] = '*scpb.ColumnFamily'
- - joinOnColumnFamilyID($column-type, $column-family, $table-id, $family-id)
- - toAbsent($column-type-Target, $column-family-Target)
- - $column-type-Node[CurrentStatus] = ABSENT
- - $column-family-Node[CurrentStatus] = ABSENT
- - joinTargetNode($column-type, $column-type-Target, $column-type-Node)
- - joinTargetNode($column-family, $column-family-Target, $column-family-Node)
-- name: column type removed right before column when not dropping relation
- from: column-type-Node
- kind: SameStagePrecedence
- to: column-Node
- query:
- - $column-type[Type] = '*scpb.ColumnType'
- - descriptorIsNotBeingDropped-24.3($column-type)
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($column-type, $column, $table-id, $col-id)
- - toAbsent($column-type-Target, $column-Target)
- - $column-type-Node[CurrentStatus] = ABSENT
- - $column-Node[CurrentStatus] = ABSENT
- - joinTargetNode($column-type, $column-type-Target, $column-type-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: column type update is decomposed as a drop then add
- from: old-column-type-Node
- kind: Precedence
- to: new-column-type-Node
- query:
- - $old-column-type[Type] = '*scpb.ColumnType'
- - $new-column-type[Type] = '*scpb.ColumnType'
- - joinOnColumnID($old-column-type, $new-column-type, $table-id, $col-id)
- - $old-column-type-Target[TargetStatus] = ABSENT
- - $old-column-type-Node[CurrentStatus] = PUBLIC
- - $new-column-type-Target[TargetStatus] = PUBLIC
- - $new-column-type-Node[CurrentStatus] = ABSENT
- - joinTargetNode($old-column-type, $old-column-type-Target, $old-column-type-Node)
- - joinTargetNode($new-column-type, $new-column-type-Target, $new-column-type-Node)
-- name: column writable right before column constraint is enforced.
- from: column-Node
- kind: SameStagePrecedence
- to: column-constraint-Node
- query:
- - $column[Type] = '*scpb.Column'
- - $column-constraint[Type] = '*scpb.ColumnNotNull'
- - joinOnColumnID($column, $column-constraint, $table-id, $col-id)
- - ToPublicOrTransient($column-Target, $column-constraint-Target)
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - $column-constraint-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($column, $column-Target, $column-Node)
- - joinTargetNode($column-constraint, $column-constraint-Target, $column-constraint-Node)
-- name: constraint dependent public right before complex constraint
- from: dependent-Node
- kind: SameStagePrecedence
- to: complex-constraint-Node
- query:
- - $dependent[Type] = '*scpb.ConstraintComment'
- - $complex-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - joinOnConstraintID($dependent, $complex-constraint, $table-id, $constraint-id)
- - ToPublicOrTransient($dependent-Target, $complex-constraint-Target)
- - $dependent-Node[CurrentStatus] = PUBLIC
- - $complex-constraint-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($complex-constraint, $complex-constraint-Target, $complex-constraint-Node)
-- name: constraint no longer public before dependents
- from: constraint-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $dependent[Type] = '*scpb.ConstraintComment'
- - joinOnConstraintID($constraint, $dependent, $table-id, $constraint-id)
- - toAbsent($constraint-Target, $dependent-Target)
- - $constraint-Node[CurrentStatus] = VALIDATED
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: constraint no longer public before dependents
- from: constraint-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $dependent[Type] = '*scpb.ConstraintComment'
- - joinOnConstraintID($constraint, $dependent, $table-id, $constraint-id)
- - transient($constraint-Target, $dependent-Target)
- - $constraint-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: constraint no longer public before dependents
- from: constraint-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $dependent[Type] = '*scpb.ConstraintComment'
- - joinOnConstraintID($constraint, $dependent, $table-id, $constraint-id)
- - $constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $dependent-Target[TargetStatus] = ABSENT
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: constraint no longer public before dependents
- from: constraint-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $dependent[Type] = '*scpb.ConstraintComment'
- - joinOnConstraintID($constraint, $dependent, $table-id, $constraint-id)
- - $constraint-Target[TargetStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = VALIDATED
- - $dependent-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: cross-descriptor constraint is absent before referenced descriptor is dropped
- from: cross-desc-constraint-Node
- kind: Precedence
- to: referenced-descriptor-Node
- query:
- - $cross-desc-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $referenced-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - joinReferencedDescID($cross-desc-constraint, $referenced-descriptor, $desc-id)
- - toAbsent($cross-desc-constraint-Target, $referenced-descriptor-Target)
- - $cross-desc-constraint-Node[CurrentStatus] = ABSENT
- - $referenced-descriptor-Node[CurrentStatus] = DROPPED
- - joinTargetNode($cross-desc-constraint, $cross-desc-constraint-Target, $cross-desc-constraint-Node)
- - joinTargetNode($referenced-descriptor, $referenced-descriptor-Target, $referenced-descriptor-Node)
-- name: cross-descriptor constraint is absent before referencing descriptor is dropped
- from: cross-desc-constraint-Node
- kind: Precedence
- to: referencing-descriptor-Node
- query:
- - $cross-desc-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - $referencing-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - joinOnDescID($cross-desc-constraint, $referencing-descriptor, $desc-id)
- - toAbsent($cross-desc-constraint-Target, $referencing-descriptor-Target)
- - $cross-desc-constraint-Node[CurrentStatus] = ABSENT
- - $referencing-descriptor-Node[CurrentStatus] = DROPPED
- - joinTargetNode($cross-desc-constraint, $cross-desc-constraint-Target, $cross-desc-constraint-Node)
- - joinTargetNode($referencing-descriptor, $referencing-descriptor-Target, $referencing-descriptor-Node)
-- name: dependent view absent before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - toAbsent($view-Target, $index-Target)
- - $view-Node[CurrentStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view absent before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - transient($view-Target, $index-Target)
- - $view-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view absent before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - $view-Target[TargetStatus] = TRANSIENT_ABSENT
- - $view-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view absent before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - $view-Target[TargetStatus] = ABSENT
- - $view-Node[CurrentStatus] = ABSENT
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view no longer public before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - toAbsent($view-Target, $index-Target)
- - $view-Node[CurrentStatus] = DROPPED
- - $index-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view no longer public before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - transient($view-Target, $index-Target)
- - $view-Node[CurrentStatus] = TRANSIENT_DROPPED
- - $index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view no longer public before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - $view-Target[TargetStatus] = TRANSIENT_ABSENT
- - $view-Node[CurrentStatus] = TRANSIENT_DROPPED
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependent view no longer public before secondary index
- from: view-Node
- kind: Precedence
- to: index-Node
- query:
- - $view[Type] = '*scpb.View'
- - $index[Type] = '*scpb.SecondaryIndex'
- - viewReferencesIndex(*scpb.View, *scpb.SecondaryIndex)($view, $index)
- - $view-Target[TargetStatus] = ABSENT
- - $view-Node[CurrentStatus] = DROPPED
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - joinTargetNode($view, $view-Target, $view-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependents exist before descriptor becomes public
- from: dependent-Node
- kind: Precedence
- to: relation-Node
- query:
- - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.LDRJobIDs', '*scpb.Namespace', '*scpb.Owner', '*scpb.PartitionZoneConfig', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.Trigger', '*scpb.TriggerDeps', '*scpb.TriggerEnabled', '*scpb.TriggerEvents', '*scpb.TriggerFunctionCall', '*scpb.TriggerName', '*scpb.TriggerTiming', '*scpb.TriggerTransition', '*scpb.TriggerWhen', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges']
- - $relation[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - joinOnDescID($dependent, $relation, $relation-id)
- - ToPublicOrTransient($dependent-Target, $relation-Target)
- - $dependent-Node[CurrentStatus] = PUBLIC
- - $relation-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($relation, $relation-Target, $relation-Node)
-- name: dependents removed before column
- from: dependent-Node
- kind: Precedence
- to: column-Node
- query:
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($dependent, $column, $table-id, $col-id)
- - toAbsent($dependent-Target, $column-Target)
- - $dependent-Node[CurrentStatus] = ABSENT
- - $column-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: dependents removed before column
- from: dependent-Node
- kind: Precedence
- to: column-Node
- query:
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($dependent, $column, $table-id, $col-id)
- - transient($dependent-Target, $column-Target)
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: dependents removed before column
- from: dependent-Node
- kind: Precedence
- to: column-Node
- query:
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($dependent, $column, $table-id, $col-id)
- - $dependent-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: dependents removed before column
- from: dependent-Node
- kind: Precedence
- to: column-Node
- query:
- - $dependent[Type] IN ['*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.IndexColumn', '*scpb.SequenceOwner']
- - $column[Type] = '*scpb.Column'
- - joinOnColumnID($dependent, $column, $table-id, $col-id)
- - $dependent-Target[TargetStatus] = ABSENT
- - $dependent-Node[CurrentStatus] = ABSENT
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: dependents removed before constraint
- from: dependents-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - toAbsent($dependents-Target, $constraint-Target)
- - $dependents-Node[CurrentStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed before constraint
- from: dependents-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - transient($dependents-Target, $constraint-Target)
- - $dependents-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed before constraint
- from: dependents-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - $dependents-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependents-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $constraint-Target[TargetStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed before constraint
- from: dependents-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - $dependents-Target[TargetStatus] = ABSENT
- - $dependents-Node[CurrentStatus] = ABSENT
- - $constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed before index
- from: dependent-Node
- kind: Precedence
- to: index-Node
- query:
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($dependent, $index, $table-id, $index-id)
- - toAbsent($dependent-Target, $index-Target)
- - $dependent-Node[CurrentStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependents removed before index
- from: dependent-Node
- kind: Precedence
- to: index-Node
- query:
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($dependent, $index, $table-id, $index-id)
- - transient($dependent-Target, $index-Target)
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependents removed before index
- from: dependent-Node
- kind: Precedence
- to: index-Node
- query:
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($dependent, $index, $table-id, $index-id)
- - $dependent-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependents removed before index
- from: dependent-Node
- kind: Precedence
- to: index-Node
- query:
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($dependent, $index, $table-id, $index-id)
- - $dependent-Target[TargetStatus] = ABSENT
- - $dependent-Node[CurrentStatus] = ABSENT
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: dependents removed right before simple constraint
- from: dependents-Node
- kind: SameStagePrecedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - toAbsent($dependents-Target, $constraint-Target)
- - $dependents-Node[CurrentStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed right before simple constraint
- from: dependents-Node
- kind: SameStagePrecedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - transient($dependents-Target, $constraint-Target)
- - $dependents-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed right before simple constraint
- from: dependents-Node
- kind: SameStagePrecedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - $dependents-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependents-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $constraint-Target[TargetStatus] = ABSENT
- - $constraint-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: dependents removed right before simple constraint
- from: dependents-Node
- kind: SameStagePrecedence
- to: constraint-Node
- query:
- - $dependents[Type] = '*scpb.ConstraintComment'
- - $constraint[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - joinOnConstraintID($dependents, $constraint, $table-id, $constraint-id)
- - $dependents-Target[TargetStatus] = ABSENT
- - $dependents-Node[CurrentStatus] = ABSENT
- - $constraint-Target[TargetStatus] = TRANSIENT_ABSENT
- - $constraint-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($dependents, $dependents-Target, $dependents-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: descriptor drop right before removing dependent between types
- from: referenced-descriptor-Node
- kind: SameStagePrecedence
- to: referencing-via-type-Node
- query:
- - $referenced-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.EnumType']
- - $referenced-descriptor[DescID] = $fromDescID
- - $referencing-via-type[ReferencedTypeIDs] CONTAINS $fromDescID
- - $referencing-via-type[Type] = '*scpb.ColumnType'
- - toAbsent($referenced-descriptor-Target, $referencing-via-type-Target)
- - $referenced-descriptor-Node[CurrentStatus] = DROPPED
- - $referencing-via-type-Node[CurrentStatus] = ABSENT
- - joinTargetNode($referenced-descriptor, $referenced-descriptor-Target, $referenced-descriptor-Node)
- - joinTargetNode($referencing-via-type, $referencing-via-type-Target, $referencing-via-type-Node)
-- name: descriptor drop right before removing dependent with attr ref
- from: referenced-descriptor-Node
- kind: SameStagePrecedence
- to: referencing-via-attr-Node
- query:
- - $referenced-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $referencing-via-attr[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.LDRJobIDs', '*scpb.Namespace', '*scpb.Owner', '*scpb.PartitionZoneConfig', '*scpb.RowLevelTTL', '*scpb.SchemaComment', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.Trigger', '*scpb.TriggerDeps', '*scpb.TriggerEnabled', '*scpb.TriggerEvents', '*scpb.TriggerFunctionCall', '*scpb.TriggerName', '*scpb.TriggerTiming', '*scpb.TriggerTransition', '*scpb.TriggerWhen', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges']
- - joinReferencedDescID($referencing-via-attr, $referenced-descriptor, $desc-id)
- - toAbsent($referenced-descriptor-Target, $referencing-via-attr-Target)
- - $referenced-descriptor-Node[CurrentStatus] = DROPPED
- - $referencing-via-attr-Node[CurrentStatus] = ABSENT
- - joinTargetNode($referenced-descriptor, $referenced-descriptor-Target, $referenced-descriptor-Node)
- - joinTargetNode($referencing-via-attr, $referencing-via-attr-Target, $referencing-via-attr-Node)
-- name: descriptor drop right before removing dependent with expr ref to sequence
- from: referenced-descriptor-Node
- kind: SameStagePrecedence
- to: referencing-via-expr-Node
- query:
- - $referenced-descriptor[Type] = '*scpb.Sequence'
- - $referenced-descriptor[DescID] = $seqID
- - $referencing-via-expr[ReferencedSequenceIDs] CONTAINS $seqID
- - $referencing-via-expr[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.SecondaryIndexPartial']
- - toAbsent($referenced-descriptor-Target, $referencing-via-expr-Target)
- - $referenced-descriptor-Node[CurrentStatus] = DROPPED
- - $referencing-via-expr-Node[CurrentStatus] = ABSENT
- - joinTargetNode($referenced-descriptor, $referenced-descriptor-Target, $referenced-descriptor-Node)
- - joinTargetNode($referencing-via-expr, $referencing-via-expr-Target, $referencing-via-expr-Node)
-- name: descriptor drop right before removing dependent with function refs in columns
- from: referenced-descriptor-Node
- kind: SameStagePrecedence
- to: referencing-via-function-Node
- query:
- - $referenced-descriptor[Type] = '*scpb.Function'
- - $referenced-descriptor[DescID] = $fromDescID
- - $referencing-via-function[ReferencedFunctionIDs] CONTAINS $fromDescID
- - $referencing-via-function[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.SecondaryIndexPartial']
- - toAbsent($referenced-descriptor-Target, $referencing-via-function-Target)
- - $referenced-descriptor-Node[CurrentStatus] = DROPPED
- - $referencing-via-function-Node[CurrentStatus] = ABSENT
- - joinTargetNode($referenced-descriptor, $referenced-descriptor-Target, $referenced-descriptor-Node)
- - joinTargetNode($referencing-via-function, $referencing-via-function-Target, $referencing-via-function-Node)
-- name: descriptor drop right before removing dependent with type refs in expressions
- from: referenced-descriptor-Node
- kind: SameStagePrecedence
- to: referencing-via-type-Node
- query:
- - $referenced-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.EnumType']
- - $referenced-descriptor[DescID] = $fromDescID
- - $referencing-via-type[ReferencedTypeIDs] CONTAINS $fromDescID
- - descriptorIsNotBeingDropped-24.3($referencing-via-type)
- - $referencing-via-type[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.SecondaryIndexPartial']
- - toAbsent($referenced-descriptor-Target, $referencing-via-type-Target)
- - $referenced-descriptor-Node[CurrentStatus] = DROPPED
- - $referencing-via-type-Node[CurrentStatus] = ABSENT
- - joinTargetNode($referenced-descriptor, $referenced-descriptor-Target, $referenced-descriptor-Node)
- - joinTargetNode($referencing-via-type, $referencing-via-type-Target, $referencing-via-type-Node)
-- name: descriptor dropped before dependent element removal
- from: descriptor-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $dependent[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.LDRJobIDs', '*scpb.Namespace', '*scpb.Owner', '*scpb.PartitionZoneConfig', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.Trigger', '*scpb.TriggerDeps', '*scpb.TriggerEnabled', '*scpb.TriggerEvents', '*scpb.TriggerFunctionCall', '*scpb.TriggerName', '*scpb.TriggerTiming', '*scpb.TriggerTransition', '*scpb.TriggerWhen', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges']
- - joinOnDescID($descriptor, $dependent, $desc-id)
- - toAbsent($descriptor-Target, $dependent-Target)
- - $descriptor-Node[CurrentStatus] = DROPPED
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($descriptor, $descriptor-Target, $descriptor-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: descriptor dropped in transaction before removal
- from: dropped-Node
- kind: PreviousTransactionPrecedence
- to: absent-Node
- query:
- - $dropped[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $dropped[DescID] = $_
- - $dropped[Self] = $absent
- - toAbsent($dropped-Target, $absent-Target)
- - $dropped-Node[CurrentStatus] = DROPPED
- - $absent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dropped, $dropped-Target, $dropped-Node)
- - joinTargetNode($absent, $absent-Target, $absent-Node)
-- name: descriptor dropped right before removing back-reference in its parent descriptor
- from: descriptor-Node
- kind: SameStagePrecedence
- to: back-reference-in-parent-descriptor-Node
- query:
- - $descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $back-reference-in-parent-descriptor[Type] IN ['*scpb.SchemaChild', '*scpb.SchemaParent']
- - joinOnDescID($descriptor, $back-reference-in-parent-descriptor, $desc-id)
- - toAbsent($descriptor-Target, $back-reference-in-parent-descriptor-Target)
- - $descriptor-Node[CurrentStatus] = DROPPED
- - $back-reference-in-parent-descriptor-Node[CurrentStatus] = ABSENT
- - joinTargetNode($descriptor, $descriptor-Target, $descriptor-Node)
- - joinTargetNode($back-reference-in-parent-descriptor, $back-reference-in-parent-descriptor-Target, $back-reference-in-parent-descriptor-Node)
-- name: descriptor existence precedes dependents
- from: relation-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $relation[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseData', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexData', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.LDRJobIDs', '*scpb.Namespace', '*scpb.Owner', '*scpb.PartitionZoneConfig', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableData', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.Trigger', '*scpb.TriggerDeps', '*scpb.TriggerEnabled', '*scpb.TriggerEvents', '*scpb.TriggerFunctionCall', '*scpb.TriggerName', '*scpb.TriggerTiming', '*scpb.TriggerTransition', '*scpb.TriggerWhen', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges']
- - joinOnDescID($relation, $dependent, $relation-id)
- - ToPublicOrTransient($relation-Target, $dependent-Target)
- - $relation-Node[CurrentStatus] = DESCRIPTOR_ADDED
- - $dependent-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($relation, $relation-Target, $relation-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: descriptor removed right before garbage collection
- from: database-Node
- kind: SameStagePrecedence
- to: data-Node
- query:
- - $database[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $data[Type] = '*scpb.DatabaseData'
- - joinOnDescID($database, $data, $db-id)
- - toAbsent($database-Target, $data-Target)
- - $database-Node[CurrentStatus] = ABSENT
- - $data-Node[CurrentStatus] = DROPPED
- - joinTargetNode($database, $database-Target, $database-Node)
- - joinTargetNode($data, $data-Target, $data-Node)
-- name: ensure columns are in increasing order
- from: later-column-Node
- kind: Precedence
- to: earlier-column-Node
- query:
- - $later-column[Type] = '*scpb.Column'
- - joinTargetNode($later-column, $later-column-Target, $later-column-Node)
- - $earlier-column[Type] = '*scpb.Column'
- - joinOnDescID($later-column, $earlier-column, $table-id)
- - ToPublicOrTransient($later-column-Target, $earlier-column-Target)
- - $status IN [WRITE_ONLY, PUBLIC]
- - $later-column-Node[CurrentStatus] = $status
- - $earlier-column-Node[CurrentStatus] = $status
- - SmallerColumnIDFirst(*scpb.Column, *scpb.Column)($later-column, $earlier-column)
- - joinTargetNode($later-column, $later-column-Target, $later-column-Node)
- - joinTargetNode($earlier-column, $earlier-column-Target, $earlier-column-Node)
-- name: function name should be set before parent ids
- from: function-name-Node
- kind: Precedence
- to: function-parent-Node
- query:
- - $function-name[Type] = '*scpb.FunctionName'
- - $function-parent[Type] = '*scpb.SchemaChild'
- - joinOnDescID($function-name, $function-parent, $function-id)
- - ToPublicOrTransient($function-name-Target, $function-parent-Target)
- - $function-name-Node[CurrentStatus] = PUBLIC
- - $function-parent-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($function-name, $function-name-Target, $function-name-Node)
- - joinTargetNode($function-parent, $function-parent-Target, $function-parent-Node)
-- name: index data exists as soon as index accepts backfills
- from: index-name-Node
- kind: SameStagePrecedence
- to: index-Node
- query:
- - $index-name[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $index[Type] = '*scpb.IndexData'
- - joinOnIndexID($index-name, $index, $table-id, $index-id)
- - ToPublicOrTransient($index-name-Target, $index-Target)
- - $index-name-Node[CurrentStatus] = BACKFILL_ONLY
- - $index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($index-name, $index-name-Target, $index-name-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: index dependents exist before index becomes public
- from: dependent-Node
- kind: Precedence
- to: index-Node
- query:
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($dependent, $index, $table-id, $index-id)
- - ToPublicOrTransient($dependent-Target, $index-Target)
- - $dependent-Node[CurrentStatus] = PUBLIC
- - $index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: index drop mutation visible before cleaning up index columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] = '*scpb.IndexColumn'
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - toAbsent($index-Target, $dependent-Target)
- - $index-Node[CurrentStatus] = DELETE_ONLY
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index drop mutation visible before cleaning up index columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] = '*scpb.IndexColumn'
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - transient($index-Target, $dependent-Target)
- - $index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index drop mutation visible before cleaning up index columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] = '*scpb.IndexColumn'
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $dependent-Target[TargetStatus] = ABSENT
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index drop mutation visible before cleaning up index columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] = '*scpb.IndexColumn'
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = DELETE_ONLY
- - $dependent-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index existence precedes index dependents
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - ToPublicOrTransient($index-Target, $dependent-Target)
- - $index-Node[CurrentStatus] = BACKFILL_ONLY
- - $dependent-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index is MERGED before its temp index starts to disappear
- from: index-Node
- kind: Precedence
- to: temp-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $temp[Type] = '*scpb.TemporaryIndex'
- - joinOnDescID($index, $temp, $table-id)
- - $index[TemporaryIndexID] = $temp-index-id
- - $temp[IndexID] = $temp-index-id
- - $index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - $index-Node[CurrentStatus] = MERGED
- - $temp-Target[TargetStatus] = TRANSIENT_ABSENT
- - $temp-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($temp, $temp-Target, $temp-Node)
-- name: index is ready to be validated before we validate constraint on it
- from: index-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $index[Type] = '*scpb.PrimaryIndex'
- - $constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.UniqueWithoutIndexConstraint']
- - joinOnDescID($index, $constraint, $table-id)
- - $index[IndexID] = $index-id-for-validation
- - $constraint[IndexID] = $index-id-for-validation
- - ToPublicOrTransient($index-Target, $constraint-Target)
- - $index-Node[CurrentStatus] = VALIDATED
- - $constraint-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: index no longer public before dependents, excluding columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] IN ['*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - toAbsent($index-Target, $dependent-Target)
- - $index-Node[CurrentStatus] = VALIDATED
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index no longer public before dependents, excluding columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] IN ['*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - transient($index-Target, $dependent-Target)
- - $index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index no longer public before dependents, excluding columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] IN ['*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $dependent-Target[TargetStatus] = ABSENT
- - $dependent-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index no longer public before dependents, excluding columns
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $dependent[Type] IN ['*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = VALIDATED
- - $dependent-Target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: index no longer public before index name
- from: index-Node
- kind: Precedence
- to: name-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $name[Type] = '*scpb.IndexName'
- - joinOnIndexID($index, $name, $table-id, $index-id)
- - toAbsent($index-Target, $name-Target)
- - $index-Node[CurrentStatus] = DELETE_ONLY
- - $name-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($name, $name-Target, $name-Node)
-- name: index no longer public before index name
- from: index-Node
- kind: Precedence
- to: name-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $name[Type] = '*scpb.IndexName'
- - joinOnIndexID($index, $name, $table-id, $index-id)
- - transient($index-Target, $name-Target)
- - $index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $name-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($name, $name-Target, $name-Node)
-- name: index no longer public before index name
- from: index-Node
- kind: Precedence
- to: name-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $name[Type] = '*scpb.IndexName'
- - joinOnIndexID($index, $name, $table-id, $index-id)
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $name-Target[TargetStatus] = ABSENT
- - $name-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($name, $name-Target, $name-Node)
-- name: index no longer public before index name
- from: index-Node
- kind: Precedence
- to: name-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $name[Type] = '*scpb.IndexName'
- - joinOnIndexID($index, $name, $table-id, $index-id)
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = DELETE_ONLY
- - $name-Target[TargetStatus] = TRANSIENT_ABSENT
- - $name-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($name, $name-Target, $name-Node)
-- name: index removed before garbage collection
- from: index-Node
- kind: Precedence
- to: index-data-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $index-data[Type] = '*scpb.IndexData'
- - joinOnIndexID($index, $index-data, $table-id, $index-id)
- - toAbsent($index-Target, $index-data-Target)
- - $index-Node[CurrentStatus] = ABSENT
- - $index-data-Node[CurrentStatus] = DROPPED
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($index-data, $index-data-Target, $index-data-Node)
-- name: index removed before garbage collection
- from: index-Node
- kind: Precedence
- to: index-data-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $index-data[Type] = '*scpb.IndexData'
- - joinOnIndexID($index, $index-data, $table-id, $index-id)
- - transient($index-Target, $index-data-Target)
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-data-Node[CurrentStatus] = TRANSIENT_DROPPED
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($index-data, $index-data-Target, $index-data-Node)
-- name: index removed before garbage collection
- from: index-Node
- kind: Precedence
- to: index-data-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $index-data[Type] = '*scpb.IndexData'
- - joinOnIndexID($index, $index-data, $table-id, $index-id)
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-data-Target[TargetStatus] = ABSENT
- - $index-data-Node[CurrentStatus] = DROPPED
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($index-data, $index-data-Target, $index-data-Node)
-- name: index removed before garbage collection
- from: index-Node
- kind: Precedence
- to: index-data-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - $index-data[Type] = '*scpb.IndexData'
- - joinOnIndexID($index, $index-data, $table-id, $index-id)
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - $index-data-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-data-Node[CurrentStatus] = TRANSIENT_DROPPED
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($index-data, $index-data-Target, $index-data-Node)
-- name: index-column added to index before index is backfilled
- from: index-column-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-column[Type] = '*scpb.IndexColumn'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - joinOnIndexID($index-column, $index, $table-id, $index-id)
- - ToPublicOrTransient($index-column-Target, $index-Target)
- - $index-column-Node[CurrentStatus] = PUBLIC
- - $index-Node[CurrentStatus] = BACKFILLED
- - joinTargetNode($index-column, $index-column-Target, $index-column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: index-column added to index before temp index receives writes
- from: index-column-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-column[Type] = '*scpb.IndexColumn'
- - $index[Type] = '*scpb.TemporaryIndex'
- - joinOnIndexID($index-column, $index, $table-id, $index-id)
- - transient($index-column-Target, $index-Target)
- - $index-column-Node[CurrentStatus] = PUBLIC
- - $index-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($index-column, $index-column-Target, $index-column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: indexes containing column reach absent before column
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $column[Type] = '*scpb.Column'
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-24.3($index-column)
- - toAbsent($index-Target, $column-Target)
- - $index-Node[CurrentStatus] = ABSENT
- - $column-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: indexes containing column reach absent before column
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $column[Type] = '*scpb.Column'
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-24.3($index-column)
- - transient($index-Target, $column-Target)
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: indexes containing column reach absent before column
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $column[Type] = '*scpb.Column'
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-24.3($index-column)
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: indexes containing column reach absent before column
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - $column[Type] = '*scpb.Column'
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - descriptorIsNotBeingDropped-24.3($index-column)
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: namespace exist before schema parent
- from: dependent-Node
- kind: Precedence
- to: relation-Node
- query:
- - $dependent[Type] = '*scpb.Namespace'
- - $relation[Type] = '*scpb.SchemaParent'
- - joinOnDescID($dependent, $relation, $schema-id)
- - ToPublicOrTransient($dependent-Target, $relation-Target)
- - $dependent-Node[CurrentStatus] = PUBLIC
- - $relation-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($relation, $relation-Target, $relation-Node)
-- name: non-data dependents removed before descriptor
- from: dependent-Node
- kind: Precedence
- to: descriptor-Node
- query:
- - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.LDRJobIDs', '*scpb.Namespace', '*scpb.Owner', '*scpb.PartitionZoneConfig', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.Trigger', '*scpb.TriggerDeps', '*scpb.TriggerEnabled', '*scpb.TriggerEvents', '*scpb.TriggerFunctionCall', '*scpb.TriggerName', '*scpb.TriggerTiming', '*scpb.TriggerTransition', '*scpb.TriggerWhen', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges']
- - $descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - joinOnDescID($dependent, $descriptor, $desc-id)
- - toAbsent($dependent-Target, $descriptor-Target)
- - $dependent-Node[CurrentStatus] = ABSENT
- - $descriptor-Node[CurrentStatus] = ABSENT
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
- - joinTargetNode($descriptor, $descriptor-Target, $descriptor-Node)
-- name: old index absent before new index public when swapping with transient
- from: old-primary-index-Node
- kind: Precedence
- to: new-primary-index-Node
- query:
- - $old-primary-index[Type] = '*scpb.PrimaryIndex'
- - $transient-primary-index[Type] = '*scpb.PrimaryIndex'
- - $new-primary-index[Type] = '*scpb.PrimaryIndex'
- - joinOnDescID($old-primary-index, $transient-primary-index, $table-id)
- - $old-primary-index[IndexID] = $old-index-id
- - $transient-primary-index[SourceIndexID] = $old-index-id
- - joinOnDescID($transient-primary-index, $new-primary-index, $table-id)
- - $transient-primary-index[IndexID] = $transient-index-id
- - $new-primary-index[SourceIndexID] = $transient-index-id
- - $old-primary-index-Target[TargetStatus] = ABSENT
- - $old-primary-index-Node[CurrentStatus] = ABSENT
- - $new-primary-index-Target[TargetStatus] = PUBLIC
- - $new-primary-index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($old-primary-index, $old-primary-index-Target, $old-primary-index-Node)
- - joinTargetNode($new-primary-index, $new-primary-index-Target, $new-primary-index-Node)
-- name: partial predicate removed right before secondary index when not dropping relation
- from: partial-predicate-Node
- kind: SameStagePrecedence
- to: index-Node
- query:
- - $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-24.3($partial-predicate)
- - $index[Type] = '*scpb.SecondaryIndex'
- - joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- - toAbsent($partial-predicate-Target, $index-Target)
- - $partial-predicate-Node[CurrentStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($partial-predicate, $partial-predicate-Target, $partial-predicate-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: partial predicate removed right before secondary index when not dropping relation
- from: partial-predicate-Node
- kind: SameStagePrecedence
- to: index-Node
- query:
- - $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-24.3($partial-predicate)
- - $index[Type] = '*scpb.SecondaryIndex'
- - joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- - transient($partial-predicate-Target, $index-Target)
- - $partial-predicate-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($partial-predicate, $partial-predicate-Target, $partial-predicate-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: partial predicate removed right before secondary index when not dropping relation
- from: partial-predicate-Node
- kind: SameStagePrecedence
- to: index-Node
- query:
- - $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-24.3($partial-predicate)
- - $index[Type] = '*scpb.SecondaryIndex'
- - joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- - $partial-predicate-Target[TargetStatus] = TRANSIENT_ABSENT
- - $partial-predicate-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($partial-predicate, $partial-predicate-Target, $partial-predicate-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: partial predicate removed right before secondary index when not dropping relation
- from: partial-predicate-Node
- kind: SameStagePrecedence
- to: index-Node
- query:
- - $partial-predicate[Type] = '*scpb.SecondaryIndexPartial'
- - descriptorIsNotBeingDropped-24.3($partial-predicate)
- - $index[Type] = '*scpb.SecondaryIndex'
- - joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- - $partial-predicate-Target[TargetStatus] = ABSENT
- - $partial-predicate-Node[CurrentStatus] = ABSENT
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($partial-predicate, $partial-predicate-Target, $partial-predicate-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: primary index named right before index becomes public
- from: index-name-Node
- kind: SameStagePrecedence
- to: index-Node
- query:
- - $index-name[Type] = '*scpb.IndexName'
- - $index[Type] = '*scpb.PrimaryIndex'
- - joinOnIndexID($index-name, $index, $table-id, $index-id)
- - ToPublicOrTransient($index-name-Target, $index-Target)
- - $index-name-Node[CurrentStatus] = PUBLIC
- - $index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($index-name, $index-name-Target, $index-name-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: primary index swap
- from: old-index-Node
- kind: SameStagePrecedence
- to: new-index-Node
- query:
- - $old-index[Type] = '*scpb.PrimaryIndex'
- - $new-index[Type] = '*scpb.PrimaryIndex'
- - joinOnDescID($old-index, $new-index, $table-id)
- - $old-index[IndexID] = $old-index-id
- - $new-index[SourceIndexID] = $old-index-id
- - $old-index-Target[TargetStatus] = ABSENT
- - $old-index-Node[CurrentStatus] = VALIDATED
- - $new-index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - $new-index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($old-index, $old-index-Target, $old-index-Node)
- - joinTargetNode($new-index, $new-index-Target, $new-index-Node)
-- name: primary index swap
- from: old-index-Node
- kind: SameStagePrecedence
- to: new-index-Node
- query:
- - $old-index[Type] = '*scpb.PrimaryIndex'
- - $new-index[Type] = '*scpb.PrimaryIndex'
- - joinOnDescID($old-index, $new-index, $table-id)
- - $old-index[IndexID] = $old-index-id
- - $new-index[SourceIndexID] = $old-index-id
- - $old-index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $old-index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $new-index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - $new-index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($old-index, $old-index-Target, $old-index-Node)
- - joinTargetNode($new-index, $new-index-Target, $new-index-Node)
-- name: primary index swap
- from: new-index-Node
- kind: SameStagePrecedence
- to: old-index-Node
- query:
- - $new-index[Type] = '*scpb.PrimaryIndex'
- - $old-index[Type] = '*scpb.PrimaryIndex'
- - joinOnDescID($new-index, $old-index, $table-id)
- - $new-index[SourceIndexID] = $old-index-id
- - $old-index[IndexID] = $old-index-id
- - $new-index-Target[TargetStatus] = ABSENT
- - $new-index-Node[CurrentStatus] = VALIDATED
- - $old-index-Target[TargetStatus] = PUBLIC
- - $old-index-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($new-index, $new-index-Target, $new-index-Node)
- - joinTargetNode($old-index, $old-index-Target, $old-index-Node)
-- name: primary index with new columns should exist before secondary indexes
- from: primary-index-Node
- kind: Precedence
- to: secondary-index-Node
- query:
- - $primary-index[Type] = '*scpb.PrimaryIndex'
- - $secondary-index[Type] = '*scpb.SecondaryIndex'
- - joinOnDescID($primary-index, $secondary-index, $table-id)
- - $primary-index[IndexID] = $primary-index-id
- - $secondary-index[SourceIndexID] = $primary-index-id
- - ToPublicOrTransient($primary-index-Target, $secondary-index-Target)
- - $primary-index-Node[CurrentStatus] = PUBLIC
- - $secondary-index-Node[CurrentStatus] = BACKFILL_ONLY
- - joinTargetNode($primary-index, $primary-index-Target, $primary-index-Node)
- - joinTargetNode($secondary-index, $secondary-index-Target, $secondary-index-Node)
-- name: primary index with new columns should exist before temp indexes
- from: primary-index-Node
- kind: Precedence
- to: temp-index-Node
- query:
- - $primary-index[Type] = '*scpb.PrimaryIndex'
- - $temp-index[Type] = '*scpb.TemporaryIndex'
- - joinOnDescID($primary-index, $temp-index, $table-id)
- - $primary-index[IndexID] = $primary-index-id
- - $temp-index[SourceIndexID] = $primary-index-id
- - ToPublicOrTransient($primary-index-Target, $temp-index-Target)
- - $primary-index-Node[CurrentStatus] = PUBLIC
- - $temp-index-Node[CurrentStatus] = DELETE_ONLY
- - joinTargetNode($primary-index, $primary-index-Target, $primary-index-Node)
- - joinTargetNode($temp-index, $temp-index-Target, $temp-index-Node)
-- name: relation dropped before dependent column
- from: descriptor-Node
- kind: Precedence
- to: column-Node
- query:
- - $descriptor[Type] IN ['*scpb.Table', '*scpb.View', '*scpb.Sequence']
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($descriptor, $column, $desc-id)
- - toAbsent($descriptor-Target, $column-Target)
- - $descriptor-Node[CurrentStatus] = DROPPED
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($descriptor, $descriptor-Target, $descriptor-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: relation dropped before dependent constraint
- from: descriptor-Node
- kind: Precedence
- to: constraint-Node
- query:
- - $descriptor[Type] = '*scpb.Table'
- - $constraint[Type] = '*scpb.ColumnNotNull'
- - joinOnDescID($descriptor, $constraint, $desc-id)
- - toAbsent($descriptor-Target, $constraint-Target)
- - $descriptor-Node[CurrentStatus] = DROPPED
- - $constraint-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($descriptor, $descriptor-Target, $descriptor-Node)
- - joinTargetNode($constraint, $constraint-Target, $constraint-Node)
-- name: relation dropped before dependent index
- from: descriptor-Node
- kind: Precedence
- to: index-Node
- query:
- - $descriptor[Type] IN ['*scpb.Table', '*scpb.View']
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnDescID($descriptor, $index, $desc-id)
- - toAbsent($descriptor-Target, $index-Target)
- - $descriptor-Node[CurrentStatus] = DROPPED
- - $index-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($descriptor, $descriptor-Target, $descriptor-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: remove columns from index right before removing index
- from: index-column-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-column[Type] = '*scpb.IndexColumn'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($index-column, $index, $table-id, $index-id)
- - toAbsent($index-column-Target, $index-Target)
- - $index-column-Node[CurrentStatus] = DELETE_ONLY
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index-column, $index-column-Target, $index-column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: remove columns from index right before removing index
- from: index-column-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-column[Type] = '*scpb.IndexColumn'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($index-column, $index, $table-id, $index-id)
- - transient($index-column-Target, $index-Target)
- - $index-column-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index-column, $index-column-Target, $index-column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: remove columns from index right before removing index
- from: index-column-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-column[Type] = '*scpb.IndexColumn'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($index-column, $index, $table-id, $index-id)
- - $index-column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-column-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index-column, $index-column-Target, $index-column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: remove columns from index right before removing index
- from: index-column-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-column[Type] = '*scpb.IndexColumn'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- - joinOnIndexID($index-column, $index, $table-id, $index-id)
- - $index-column-Target[TargetStatus] = ABSENT
- - $index-column-Node[CurrentStatus] = DELETE_ONLY
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index-column, $index-column-Target, $index-column-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: replacement secondary index should be validated before the old one becomes invisible
- from: new-index-Node
- kind: Precedence
- to: old-index-Node
- query:
- - $old-index[Type] = '*scpb.SecondaryIndex'
- - $new-index[Type] = '*scpb.SecondaryIndex'
- - $old-index-Target[TargetStatus] = ABSENT
- - $new-index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - joinOnDescID($old-index, $new-index, $table-id)
- - $new-index[IndexID] = $index-id
- - $old-index[IndexID] = $old-index-id
- - $new-index[RecreateSourceIndexID] = $old-index-id
- - joinTargetNode($old-index, $old-index-Target, $old-index-Node)
- - joinTargetNode($new-index, $new-index-Target, $new-index-Node)
- - $new-index-Node[CurrentStatus] = PUBLIC
- - $old-index-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($new-index, $new-index-Target, $new-index-Node)
- - joinTargetNode($old-index, $old-index-Target, $old-index-Node)
-- name: schedule all GC jobs for a descriptor in the same stage
- from: data-a-Node
- kind: SameStagePrecedence
- to: data-b-Node
- query:
- - $data-a[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - $data-b[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - joinOnDescID($data-a, $data-b, $desc-id)
- - SmallerIDsFirst(scpb.Element, scpb.Element)($data-a, $data-b)
- - toAbsent($data-a-Target, $data-b-Target)
- - $data-a-Node[CurrentStatus] = DROPPED
- - $data-b-Node[CurrentStatus] = DROPPED
- - joinTargetNode($data-a, $data-a-Target, $data-a-Node)
- - joinTargetNode($data-b, $data-b-Target, $data-b-Node)
-- name: schedule all GC jobs for a descriptor in the same stage
- from: data-a-Node
- kind: SameStagePrecedence
- to: data-b-Node
- query:
- - $data-a[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - $data-b[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - joinOnDescID($data-a, $data-b, $desc-id)
- - SmallerIDsFirst(scpb.Element, scpb.Element)($data-a, $data-b)
- - transient($data-a-Target, $data-b-Target)
- - $data-a-Node[CurrentStatus] = TRANSIENT_DROPPED
- - $data-b-Node[CurrentStatus] = TRANSIENT_DROPPED
- - joinTargetNode($data-a, $data-a-Target, $data-a-Node)
- - joinTargetNode($data-b, $data-b-Target, $data-b-Node)
-- name: schedule all GC jobs for a descriptor in the same stage
- from: data-a-Node
- kind: SameStagePrecedence
- to: data-b-Node
- query:
- - $data-a[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - $data-b[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - joinOnDescID($data-a, $data-b, $desc-id)
- - SmallerIDsFirst(scpb.Element, scpb.Element)($data-a, $data-b)
- - $data-a-Target[TargetStatus] = TRANSIENT_ABSENT
- - $data-a-Node[CurrentStatus] = TRANSIENT_DROPPED
- - $data-b-Target[TargetStatus] = ABSENT
- - $data-b-Node[CurrentStatus] = DROPPED
- - joinTargetNode($data-a, $data-a-Target, $data-a-Node)
- - joinTargetNode($data-b, $data-b-Target, $data-b-Node)
-- name: schedule all GC jobs for a descriptor in the same stage
- from: data-a-Node
- kind: SameStagePrecedence
- to: data-b-Node
- query:
- - $data-a[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - $data-b[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - joinOnDescID($data-a, $data-b, $desc-id)
- - SmallerIDsFirst(scpb.Element, scpb.Element)($data-a, $data-b)
- - $data-a-Target[TargetStatus] = ABSENT
- - $data-a-Node[CurrentStatus] = DROPPED
- - $data-b-Target[TargetStatus] = TRANSIENT_ABSENT
- - $data-b-Node[CurrentStatus] = TRANSIENT_DROPPED
- - joinTargetNode($data-a, $data-a-Target, $data-a-Node)
- - joinTargetNode($data-b, $data-b-Target, $data-b-Node)
-- name: secondary index named before public (with index swap)
- from: index-Node
- kind: Precedence
- to: index-name-Node
- query:
- - $index-name[Type] = '*scpb.IndexName'
- - $index[Type] = '*scpb.SecondaryIndex'
- - joinOnIndexID($index, $index-name, $table-id, $index-id)
- - ToPublicOrTransient($index-Target, $index-name-Target)
- - $index-Node[CurrentStatus] = VALIDATED
- - $index-name-Node[CurrentStatus] = PUBLIC
- - $old-index[Type] = '*scpb.SecondaryIndex'
- - $new-index[Type] = '*scpb.SecondaryIndex'
- - $old-index-Target[TargetStatus] = ABSENT
- - $new-index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - joinOnDescID($old-index, $new-index, $table-id)
- - $new-index[IndexID] = $index-id
- - $old-index[IndexID] = $old-index-id
- - $new-index[RecreateSourceIndexID] = $old-index-id
- - joinTargetNode($old-index, $old-index-Target, $old-index-Node)
- - joinTargetNode($new-index, $new-index-Target, $new-index-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($index-name, $index-name-Target, $index-name-Node)
-- name: secondary index named before validation (without index swap)
- from: index-name-Node
- kind: Precedence
- to: index-Node
- query:
- - $index-name[Type] = '*scpb.IndexName'
- - $index[Type] = '*scpb.SecondaryIndex'
- - joinOnIndexID($index-name, $index, $table-id, $index-id)
- - no secondary index swap is on going($table-id, $index-id)
- - ToPublicOrTransient($index-name-Target, $index-Target)
- - $index-name-Node[CurrentStatus] = PUBLIC
- - $index-Node[CurrentStatus] = VALIDATED
- - joinTargetNode($index-name, $index-name-Target, $index-name-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- - toAbsent($secondary-partial-index-Target, $column-Target)
- - $secondary-partial-index-Node[CurrentStatus] = DELETE_ONLY
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- - transient($secondary-partial-index-Target, $column-Target)
- - $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- - $secondary-partial-index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndex'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndex, *scpb.Column)($secondary-partial-index, $column)
- - $secondary-partial-index-Target[TargetStatus] = ABSENT
- - $secondary-partial-index-Node[CurrentStatus] = DELETE_ONLY
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- - toAbsent($secondary-partial-index-Target, $column-Target)
- - $secondary-partial-index-Node[CurrentStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- - transient($secondary-partial-index-Target, $column-Target)
- - $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- - $secondary-partial-index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $secondary-partial-index-Node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-Target[TargetStatus] = ABSENT
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index partial no longer public before referenced column
- from: secondary-partial-index-Node
- kind: Precedence
- to: column-Node
- query:
- - $secondary-partial-index[Type] = '*scpb.SecondaryIndexPartial'
- - $column[Type] = '*scpb.Column'
- - joinOnDescID($secondary-partial-index, $column, $table-id)
- - descriptorIsNotBeingDropped-24.3($secondary-partial-index)
- - secondaryIndexReferencesColumn(*scpb.SecondaryIndexPartial, *scpb.Column)($secondary-partial-index, $column)
- - $secondary-partial-index-Target[TargetStatus] = ABSENT
- - $secondary-partial-index-Node[CurrentStatus] = ABSENT
- - $column-Target[TargetStatus] = TRANSIENT_ABSENT
- - $column-Node[CurrentStatus] = TRANSIENT_WRITE_ONLY
- - joinTargetNode($secondary-partial-index, $secondary-partial-index-Target, $secondary-partial-index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: secondary index should be validated before dependent view can be absent
- from: index-Node
- kind: Precedence
- to: view-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $view[Type] = '*scpb.View'
- - viewReferencesIndex(*scpb.SecondaryIndex, *scpb.View)($index, $view)
- - toAbsent($index-Target, $view-Target)
- - $index-Node[CurrentStatus] = VALIDATED
- - $view-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($view, $view-Target, $view-Node)
-- name: secondary index should be validated before dependent view can be absent
- from: index-Node
- kind: Precedence
- to: view-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $view[Type] = '*scpb.View'
- - viewReferencesIndex(*scpb.SecondaryIndex, *scpb.View)($index, $view)
- - transient($index-Target, $view-Target)
- - $index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $view-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($view, $view-Target, $view-Node)
-- name: secondary index should be validated before dependent view can be absent
- from: index-Node
- kind: Precedence
- to: view-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $view[Type] = '*scpb.View'
- - viewReferencesIndex(*scpb.SecondaryIndex, *scpb.View)($index, $view)
- - $index-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Node[CurrentStatus] = TRANSIENT_VALIDATED
- - $view-Target[TargetStatus] = ABSENT
- - $view-Node[CurrentStatus] = ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($view, $view-Target, $view-Node)
-- name: secondary index should be validated before dependent view can be absent
- from: index-Node
- kind: Precedence
- to: view-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $view[Type] = '*scpb.View'
- - viewReferencesIndex(*scpb.SecondaryIndex, *scpb.View)($index, $view)
- - $index-Target[TargetStatus] = ABSENT
- - $index-Node[CurrentStatus] = VALIDATED
- - $view-Target[TargetStatus] = TRANSIENT_ABSENT
- - $view-Node[CurrentStatus] = TRANSIENT_ABSENT
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($view, $view-Target, $view-Node)
-- name: secondary indexes containing column as key reach write-only before column
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] = '*scpb.SecondaryIndex'
- - $column[Type] = '*scpb.Column'
- - ColumnInIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - toAbsent($index-Target, $column-Target)
- - $index-Node[CurrentStatus] = VALIDATED
- - $column-Node[CurrentStatus] = WRITE_ONLY
- - descriptorIsNotBeingDropped-24.3($index-column)
- - isIndexKeyColumnKey(*scpb.IndexColumn)($index-column)
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: simple constraint public right before its dependents
- from: simple-constraint-Node
- kind: SameStagePrecedence
- to: dependent-Node
- query:
- - $simple-constraint[Type] = '*scpb.ColumnNotNull'
- - $dependent[Type] IN ['*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName']
- - joinOnConstraintID($simple-constraint, $dependent, $table-id, $constraint-id)
- - ToPublicOrTransient($simple-constraint-Target, $dependent-Target)
- - $simple-constraint-Node[CurrentStatus] = PUBLIC
- - $dependent-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($simple-constraint, $simple-constraint-Target, $simple-constraint-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: simple constraint visible before name
- from: simple-constraint-Node
- kind: Precedence
- to: constraint-name-Node
- query:
- - $simple-constraint[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.ColumnNotNull', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated']
- - $constraint-name[Type] = '*scpb.ConstraintWithoutIndexName'
- - joinOnConstraintID($simple-constraint, $constraint-name, $table-id, $constraint-id)
- - ToPublicOrTransient($simple-constraint-Target, $constraint-name-Target)
- - $simple-constraint-Node[CurrentStatus] = WRITE_ONLY
- - $constraint-name-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($simple-constraint, $simple-constraint-Target, $simple-constraint-Node)
- - joinTargetNode($constraint-name, $constraint-name-Target, $constraint-name-Node)
-- name: swapped primary index public before column
- from: index-Node
- kind: Precedence
- to: column-Node
- query:
- - $index[Type] = '*scpb.PrimaryIndex'
- - $column[Type] = '*scpb.Column'
- - ColumnInSwappedInPrimaryIndex($index-column, $index, $table-id, $column-id, $index-id)
- - joinOnColumnID($index-column, $column, $table-id, $column-id)
- - ToPublicOrTransient($index-Target, $column-Target)
- - $index-Node[CurrentStatus] = PUBLIC
- - $column-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($column, $column-Target, $column-Node)
-- name: table added right before data element
- from: table-Node
- kind: Precedence
- to: data-Node
- query:
- - $table[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $data[Type] IN ['*scpb.DatabaseData', '*scpb.IndexData', '*scpb.TableData']
- - joinOnDescID($table, $data, $table-id)
- - ToPublicOrTransient($table-Target, $data-Target)
- - $table-Node[CurrentStatus] = PUBLIC
- - $data-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($table, $table-Target, $table-Node)
- - joinTargetNode($data, $data-Target, $data-Node)
-- name: table removed right before garbage collection
- from: table-Node
- kind: SameStagePrecedence
- to: data-Node
- query:
- - $table[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View']
- - $data[Type] = '*scpb.TableData'
- - joinOnDescID($table, $data, $table-id)
- - toAbsent($table-Target, $data-Target)
- - $table-Node[CurrentStatus] = ABSENT
- - $data-Node[CurrentStatus] = DROPPED
- - joinTargetNode($table, $table-Target, $table-Node)
- - joinTargetNode($data, $data-Target, $data-Node)
-- name: temp index data exists as soon as temp index accepts writes
- from: temp-index-Node
- kind: SameStagePrecedence
- to: temp-index-data-Node
- query:
- - $temp-index[Type] = '*scpb.TemporaryIndex'
- - $temp-index-data[Type] = '*scpb.IndexData'
- - joinOnIndexID($temp-index, $temp-index-data, $table-id, $index-id)
- - ToPublicOrTransient($temp-index-Target, $temp-index-data-Target)
- - $temp-index-Node[CurrentStatus] = WRITE_ONLY
- - $temp-index-data-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($temp-index, $temp-index-Target, $temp-index-Node)
- - joinTargetNode($temp-index-data, $temp-index-data-Target, $temp-index-data-Node)
-- name: temp index disappeared before its master index reaches WRITE_ONLY
- from: temp-Node
- kind: Precedence
- to: index-Node
- query:
- - $temp[Type] = '*scpb.TemporaryIndex'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - joinOnDescID($temp, $index, $table-id)
- - $temp[IndexID] = $temp-index-id
- - $index[TemporaryIndexID] = $temp-index-id
- - $temp-Target[TargetStatus] = TRANSIENT_ABSENT
- - $temp-Node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - $index-Node[CurrentStatus] = WRITE_ONLY
- - joinTargetNode($temp, $temp-Target, $temp-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
-- name: temp index existence precedes index dependents
- from: index-Node
- kind: Precedence
- to: dependent-Node
- query:
- - $index[Type] = '*scpb.TemporaryIndex'
- - $dependent[Type] IN ['*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.PartitionZoneConfig', '*scpb.SecondaryIndexPartial']
- - joinOnIndexID($index, $dependent, $table-id, $index-id)
- - ToPublicOrTransient($index-Target, $dependent-Target)
- - $index-Node[CurrentStatus] = DELETE_ONLY
- - $dependent-Node[CurrentStatus] = PUBLIC
- - joinTargetNode($index, $index-Target, $index-Node)
- - joinTargetNode($dependent, $dependent-Target, $dependent-Node)
-- name: temp index is WRITE_ONLY before backfill
- from: temp-Node
- kind: Precedence
- to: index-Node
- query:
- - $temp[Type] = '*scpb.TemporaryIndex'
- - $index[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex']
- - joinOnDescID($temp, $index, $table-id)
- - $temp[IndexID] = $temp-index-id
- - $index[TemporaryIndexID] = $temp-index-id
- - $temp-Target[TargetStatus] = TRANSIENT_ABSENT
- - $index-Target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- - $temp-Node[CurrentStatus] = WRITE_ONLY
- - $index-Node[CurrentStatus] = BACKFILLED
- - joinTargetNode($temp, $temp-Target, $temp-Node)
- - joinTargetNode($index, $index-Target, $index-Node)
diff --git a/pkg/sql/schemachanger/scplan/plan.go b/pkg/sql/schemachanger/scplan/plan.go
index f80d56d5b838..f1f0e2e7d974 100644
--- a/pkg/sql/schemachanger/scplan/plan.go
+++ b/pkg/sql/schemachanger/scplan/plan.go
@@ -18,7 +18,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules/current"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules/release_24_1"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules/release_24_2"
- "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/rules/release_24_3"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scstage"
"github.com/cockroachdb/cockroach/pkg/util/log"
@@ -156,7 +155,6 @@ type rulesForRelease struct {
// with the newest supported version first.
var rulesForReleases = []rulesForRelease{
{activeVersion: clusterversion.Latest, rulesRegistry: current.GetRegistry()},
- {activeVersion: clusterversion.V24_3, rulesRegistry: release_24_3.GetRegistry()},
{activeVersion: clusterversion.V24_2, rulesRegistry: release_24_2.GetRegistry()},
{activeVersion: clusterversion.V24_1, rulesRegistry: release_24_1.GetRegistry()},
}
diff --git a/pkg/sql/schemachanger/scplan/testdata/alter_table_alter_column_type b/pkg/sql/schemachanger/scplan/testdata/alter_table_alter_column_type
deleted file mode 100644
index 59f9f2ec40fe..000000000000
--- a/pkg/sql/schemachanger/scplan/testdata/alter_table_alter_column_type
+++ /dev/null
@@ -1,840 +0,0 @@
-setup
-CREATE TABLE defaultdb.act (
- k INT PRIMARY KEY,
- c1 INT4
-);
-SET enable_experimental_alter_column_type_general=TRUE;
-----
-
-ops
-ALTER TABLE defaultdb.act ALTER COLUMN c1 SET DATA TYPE BIGINT;
-----
-StatementPhase stage 1 of 1 with 1 MutationType op
- transitions:
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT4}, ABSENT], PUBLIC] -> ABSENT
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT8}, PUBLIC], ABSENT] -> PUBLIC
- ops:
- *scop.UpsertColumnType
- ColumnType:
- ColumnID: 2
- ElementCreationMetadata:
- in231OrLater: true
- in243OrLater: true
- IsNullable: true
- TableID: 104
- TypeT:
- Type:
- family: IntFamily
- oid: 20
- width: 64
- TypeName: INT8
-PreCommitPhase stage 1 of 2 with 1 MutationType op
- transitions:
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT4}, ABSENT], ABSENT] -> PUBLIC
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT8}, PUBLIC], PUBLIC] -> ABSENT
- ops:
- *scop.UndoAllInTxnImmediateMutationOpSideEffects
- {}
-PreCommitPhase stage 2 of 2 with 1 MutationType op
- transitions:
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT4}, ABSENT], PUBLIC] -> ABSENT
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT8}, PUBLIC], ABSENT] -> PUBLIC
- ops:
- *scop.UpsertColumnType
- ColumnType:
- ColumnID: 2
- ElementCreationMetadata:
- in231OrLater: true
- in243OrLater: true
- IsNullable: true
- TableID: 104
- TypeT:
- Type:
- family: IntFamily
- oid: 20
- width: 64
- TypeName: INT8
-
-ops
-ALTER TABLE defaultdb.act ALTER COLUMN c1 SET DATA TYPE SMALLINT;
-----
-StatementPhase stage 1 of 1 with 1 MutationType op
- transitions:
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT4}, ABSENT], PUBLIC] -> ABSENT
- [[CheckConstraint:{DescID: 104, IndexID: 1, ConstraintID: 2, ReferencedColumnIDs: [2]}, TRANSIENT_ABSENT], ABSENT] -> WRITE_ONLY
- ops:
- *scop.AddCheckConstraint
- CheckExpr: (CAST(CAST(c1 AS INT2) AS INT4) = c1)
- ColumnIDs:
- - 2
- ConstraintID: 2
- TableID: 104
- Validity: 2
-PreCommitPhase stage 1 of 2 with 1 MutationType op
- transitions:
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT4}, ABSENT], ABSENT] -> PUBLIC
- [[CheckConstraint:{DescID: 104, IndexID: 1, ConstraintID: 2, ReferencedColumnIDs: [2]}, TRANSIENT_ABSENT], WRITE_ONLY] -> ABSENT
- ops:
- *scop.UndoAllInTxnImmediateMutationOpSideEffects
- {}
-PreCommitPhase stage 2 of 2 with 3 MutationType ops
- transitions:
- [[CheckConstraint:{DescID: 104, IndexID: 1, ConstraintID: 2, ReferencedColumnIDs: [2]}, TRANSIENT_ABSENT], ABSENT] -> WRITE_ONLY
- ops:
- *scop.AddCheckConstraint
- CheckExpr: (CAST(CAST(c1 AS INT2) AS INT4) = c1)
- ColumnIDs:
- - 2
- ConstraintID: 2
- TableID: 104
- Validity: 2
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- Initialize: true
- *scop.CreateSchemaChangerJob
- Authorization:
- AppName: $ internal-test
- UserName: root
- DescriptorIDs:
- - 104
- JobID: 1
- RunningStatus: PostCommitPhase stage 1 of 1 with 1 ValidationType op pending
- Statements:
- - statement: ALTER TABLE defaultdb.act ALTER COLUMN c1 SET DATA TYPE INT2
- redactedstatement: ALTER TABLE ‹defaultdb›.public.‹act› ALTER COLUMN ‹c1› SET DATA TYPE INT2
- statementtag: ALTER TABLE
-PostCommitPhase stage 1 of 1 with 1 ValidationType op
- transitions:
- [[CheckConstraint:{DescID: 104, IndexID: 1, ConstraintID: 2, ReferencedColumnIDs: [2]}, TRANSIENT_ABSENT], WRITE_ONLY] -> VALIDATED
- ops:
- *scop.ValidateConstraint
- ConstraintID: 2
- IndexIDForValidation: 1
- TableID: 104
-PostCommitNonRevertiblePhase stage 1 of 3 with 3 MutationType ops
- transitions:
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT4}, ABSENT], PUBLIC] -> ABSENT
- [[CheckConstraint:{DescID: 104, IndexID: 1, ConstraintID: 2, ReferencedColumnIDs: [2]}, TRANSIENT_ABSENT], VALIDATED] -> PUBLIC
- ops:
- *scop.MakeValidatedCheckConstraintPublic
- ConstraintID: 2
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- IsNonCancelable: true
- JobID: 1
-PostCommitNonRevertiblePhase stage 2 of 3 with 4 MutationType ops
- transitions:
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT2}, PUBLIC], ABSENT] -> PUBLIC
- [[CheckConstraint:{DescID: 104, IndexID: 1, ConstraintID: 2, ReferencedColumnIDs: [2]}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_VALIDATED
- ops:
- *scop.MakePublicCheckConstraintValidated
- ConstraintID: 2
- TableID: 104
- *scop.UpsertColumnType
- ColumnType:
- ColumnID: 2
- ElementCreationMetadata:
- in231OrLater: true
- in243OrLater: true
- IsNullable: true
- TableID: 104
- TypeT:
- Type:
- family: IntFamily
- oid: 21
- width: 16
- TypeName: INT2
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- IsNonCancelable: true
- JobID: 1
-PostCommitNonRevertiblePhase stage 3 of 3 with 3 MutationType ops
- transitions:
- [[CheckConstraint:{DescID: 104, IndexID: 1, ConstraintID: 2, ReferencedColumnIDs: [2]}, TRANSIENT_ABSENT], TRANSIENT_VALIDATED] -> TRANSIENT_ABSENT
- ops:
- *scop.RemoveCheckConstraint
- ConstraintID: 2
- TableID: 104
- *scop.RemoveJobStateFromDescriptor
- DescriptorID: 104
- JobID: 1
- *scop.UpdateSchemaChangerJob
- DescriptorIDsToRemove:
- - 104
- IsNonCancelable: true
- JobID: 1
-
-ops
-ALTER TABLE defaultdb.act ALTER COLUMN c1 SET DATA TYPE TEXT;
-----
-StatementPhase stage 1 of 1 with 16 MutationType ops
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], ABSENT] -> BACKFILL_ONLY
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 2}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 2}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexData:{DescID: 104, IndexID: 2}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], ABSENT] -> DELETE_ONLY
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[PrimaryIndex:{DescID: 104, IndexID: 4, ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2}, PUBLIC], ABSENT] -> BACKFILL_ONLY
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 4}, PUBLIC], ABSENT] -> PUBLIC
- [[IndexData:{DescID: 104, IndexID: 4}, PUBLIC], ABSENT] -> PUBLIC
- [[Column:{DescID: 104, ColumnID: 3}, PUBLIC], ABSENT] -> DELETE_ONLY
- [[ColumnName:{DescID: 104, Name: c1, ColumnID: 3}, PUBLIC], ABSENT] -> PUBLIC
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 3, TypeName: STRING}, PUBLIC], ABSENT] -> PUBLIC
- [[ColumnComputeExpression:{DescID: 104, ColumnID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 2}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 4}, PUBLIC], ABSENT] -> PUBLIC
- [[ColumnName:{DescID: 104, Name: c1_shadow, ColumnID: 2}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- ops:
- *scop.MakeAbsentColumnDeleteOnly
- Column:
- ColumnID: 3
- PgAttributeNum: 2
- TableID: 104
- *scop.SetColumnName
- ColumnID: 3
- Name: c1
- TableID: 104
- *scop.UpsertColumnType
- ColumnType:
- ColumnID: 3
- ElementCreationMetadata:
- in231OrLater: true
- in243OrLater: true
- IsNullable: true
- TableID: 104
- TypeT:
- Type:
- family: StringFamily
- oid: 25
- TypeName: STRING
- *scop.AddColumnComputeExpression
- ComputeExpression:
- ColumnID: 3
- Expression:
- Expr: c1::STRING
- ReferencedColumnIDs:
- - 2
- TableID: 104
- *scop.SetColumnName
- ColumnID: 2
- Name: c1_shadow
- TableID: 104
- *scop.MakeAbsentIndexBackfilling
- Index:
- ConstraintID: 2
- IndexID: 2
- IsUnique: true
- SourceIndexID: 1
- TableID: 104
- TemporaryIndexID: 3
- *scop.AddColumnToIndex
- ColumnID: 1
- IndexID: 2
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 2
- IndexID: 2
- Kind: 2
- TableID: 104
- *scop.MakeAbsentTempIndexDeleteOnly
- Index:
- ConstraintID: 3
- IndexID: 3
- IsUnique: true
- SourceIndexID: 1
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 1
- IndexID: 3
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 2
- IndexID: 3
- Kind: 2
- TableID: 104
- *scop.MakeAbsentIndexBackfilling
- Index:
- ConstraintID: 4
- IndexID: 4
- IsUnique: true
- SourceIndexID: 2
- TableID: 104
- TemporaryIndexID: 5
- *scop.AddColumnToIndex
- ColumnID: 1
- IndexID: 4
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 3
- IndexID: 2
- Kind: 2
- Ordinal: 1
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 3
- IndexID: 3
- Kind: 2
- Ordinal: 1
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 3
- IndexID: 4
- Kind: 2
- TableID: 104
-PreCommitPhase stage 1 of 2 with 1 MutationType op
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], BACKFILL_ONLY] -> ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 2}, TRANSIENT_ABSENT], PUBLIC] -> ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 2}, TRANSIENT_ABSENT], PUBLIC] -> ABSENT
- [[IndexData:{DescID: 104, IndexID: 2}, TRANSIENT_ABSENT], PUBLIC] -> ABSENT
- [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], DELETE_ONLY] -> ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 3}, TRANSIENT_ABSENT], PUBLIC] -> ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 3}, TRANSIENT_ABSENT], PUBLIC] -> ABSENT
- [[PrimaryIndex:{DescID: 104, IndexID: 4, ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2}, PUBLIC], BACKFILL_ONLY] -> ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 4}, PUBLIC], PUBLIC] -> ABSENT
- [[IndexData:{DescID: 104, IndexID: 4}, PUBLIC], PUBLIC] -> ABSENT
- [[Column:{DescID: 104, ColumnID: 3}, PUBLIC], DELETE_ONLY] -> ABSENT
- [[ColumnName:{DescID: 104, Name: c1, ColumnID: 3}, PUBLIC], PUBLIC] -> ABSENT
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 3, TypeName: STRING}, PUBLIC], PUBLIC] -> ABSENT
- [[ColumnComputeExpression:{DescID: 104, ColumnID: 3}, TRANSIENT_ABSENT], PUBLIC] -> ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 2}, TRANSIENT_ABSENT], PUBLIC] -> ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 3}, TRANSIENT_ABSENT], PUBLIC] -> ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 4}, PUBLIC], PUBLIC] -> ABSENT
- [[ColumnName:{DescID: 104, Name: c1_shadow, ColumnID: 2}, TRANSIENT_ABSENT], PUBLIC] -> ABSENT
- ops:
- *scop.UndoAllInTxnImmediateMutationOpSideEffects
- {}
-PreCommitPhase stage 2 of 2 with 21 MutationType ops
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], ABSENT] -> BACKFILL_ONLY
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 2}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 2}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexData:{DescID: 104, IndexID: 2}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], ABSENT] -> DELETE_ONLY
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[PrimaryIndex:{DescID: 104, IndexID: 4, ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2}, PUBLIC], ABSENT] -> BACKFILL_ONLY
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 4}, PUBLIC], ABSENT] -> PUBLIC
- [[IndexData:{DescID: 104, IndexID: 4}, PUBLIC], ABSENT] -> PUBLIC
- [[Column:{DescID: 104, ColumnID: 3}, PUBLIC], ABSENT] -> DELETE_ONLY
- [[ColumnName:{DescID: 104, Name: c1, ColumnID: 3}, PUBLIC], ABSENT] -> PUBLIC
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 3, TypeName: STRING}, PUBLIC], ABSENT] -> PUBLIC
- [[ColumnComputeExpression:{DescID: 104, ColumnID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 2}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 4}, PUBLIC], ABSENT] -> PUBLIC
- [[ColumnName:{DescID: 104, Name: c1_shadow, ColumnID: 2}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- ops:
- *scop.MakeAbsentColumnDeleteOnly
- Column:
- ColumnID: 3
- PgAttributeNum: 2
- TableID: 104
- *scop.SetColumnName
- ColumnID: 3
- Name: c1
- TableID: 104
- *scop.UpsertColumnType
- ColumnType:
- ColumnID: 3
- ElementCreationMetadata:
- in231OrLater: true
- in243OrLater: true
- IsNullable: true
- TableID: 104
- TypeT:
- Type:
- family: StringFamily
- oid: 25
- TypeName: STRING
- *scop.AddColumnComputeExpression
- ComputeExpression:
- ColumnID: 3
- Expression:
- Expr: c1::STRING
- ReferencedColumnIDs:
- - 2
- TableID: 104
- *scop.SetColumnName
- ColumnID: 2
- Name: c1_shadow
- TableID: 104
- *scop.MakeAbsentIndexBackfilling
- Index:
- ConstraintID: 2
- IndexID: 2
- IsUnique: true
- SourceIndexID: 1
- TableID: 104
- TemporaryIndexID: 3
- *scop.MaybeAddSplitForIndex
- IndexID: 2
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 1
- IndexID: 2
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 2
- IndexID: 2
- Kind: 2
- TableID: 104
- *scop.MakeAbsentTempIndexDeleteOnly
- Index:
- ConstraintID: 3
- IndexID: 3
- IsUnique: true
- SourceIndexID: 1
- TableID: 104
- *scop.MaybeAddSplitForIndex
- IndexID: 3
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 1
- IndexID: 3
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 2
- IndexID: 3
- Kind: 2
- TableID: 104
- *scop.MakeAbsentIndexBackfilling
- Index:
- ConstraintID: 4
- IndexID: 4
- IsUnique: true
- SourceIndexID: 2
- TableID: 104
- TemporaryIndexID: 5
- *scop.MaybeAddSplitForIndex
- IndexID: 4
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 1
- IndexID: 4
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 3
- IndexID: 2
- Kind: 2
- Ordinal: 1
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 3
- IndexID: 3
- Kind: 2
- Ordinal: 1
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 3
- IndexID: 4
- Kind: 2
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- Initialize: true
- *scop.CreateSchemaChangerJob
- Authorization:
- AppName: $ internal-test
- UserName: root
- DescriptorIDs:
- - 104
- JobID: 1
- RunningStatus: PostCommitPhase stage 1 of 15 with 2 MutationType ops pending
- Statements:
- - statement: ALTER TABLE defaultdb.act ALTER COLUMN c1 SET DATA TYPE STRING
- redactedstatement: ALTER TABLE ‹defaultdb›.public.‹act› ALTER COLUMN ‹c1› SET DATA TYPE STRING
- statementtag: ALTER TABLE
-PostCommitPhase stage 1 of 15 with 4 MutationType ops
- transitions:
- [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], DELETE_ONLY] -> WRITE_ONLY
- [[IndexData:{DescID: 104, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[Column:{DescID: 104, ColumnID: 3}, PUBLIC], DELETE_ONLY] -> WRITE_ONLY
- ops:
- *scop.MakeDeleteOnlyColumnWriteOnly
- ColumnID: 3
- TableID: 104
- *scop.MakeDeleteOnlyIndexWriteOnly
- IndexID: 3
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- JobID: 1
-PostCommitPhase stage 2 of 15 with 1 BackfillType op
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], BACKFILL_ONLY] -> BACKFILLED
- ops:
- *scop.BackfillIndex
- IndexID: 2
- SourceIndexID: 1
- TableID: 104
-PostCommitPhase stage 3 of 15 with 3 MutationType ops
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], BACKFILLED] -> DELETE_ONLY
- ops:
- *scop.MakeBackfillingIndexDeleteOnly
- IndexID: 2
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- JobID: 1
-PostCommitPhase stage 4 of 15 with 3 MutationType ops
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], DELETE_ONLY] -> MERGE_ONLY
- ops:
- *scop.MakeBackfilledIndexMerging
- IndexID: 2
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- JobID: 1
-PostCommitPhase stage 5 of 15 with 1 BackfillType op
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], MERGE_ONLY] -> MERGED
- ops:
- *scop.MergeIndex
- BackfilledIndexID: 2
- TableID: 104
- TemporaryIndexID: 3
-PostCommitPhase stage 6 of 15 with 4 MutationType ops
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], MERGED] -> WRITE_ONLY
- [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], WRITE_ONLY] -> TRANSIENT_DELETE_ONLY
- ops:
- *scop.MakeWriteOnlyIndexDeleteOnly
- IndexID: 3
- TableID: 104
- *scop.MakeMergedIndexWriteOnly
- IndexID: 2
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- JobID: 1
-PostCommitPhase stage 7 of 15 with 1 ValidationType op
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], WRITE_ONLY] -> VALIDATED
- ops:
- *scop.ValidateIndex
- IndexID: 2
- TableID: 104
-PostCommitPhase stage 8 of 15 with 10 MutationType ops
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 1, ConstraintID: 1}, ABSENT], PUBLIC] -> VALIDATED
- [[IndexName:{DescID: 104, Name: act_pkey, IndexID: 1}, ABSENT], PUBLIC] -> ABSENT
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], VALIDATED] -> PUBLIC
- [[IndexName:{DescID: 104, Name: act_pkey, IndexID: 2}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[TemporaryIndex:{DescID: 104, IndexID: 5, ConstraintID: 5, SourceIndexID: 2}, TRANSIENT_ABSENT], ABSENT] -> DELETE_ONLY
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 5}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 5}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- ops:
- *scop.MakePublicPrimaryIndexWriteOnly
- IndexID: 1
- TableID: 104
- *scop.SetIndexName
- IndexID: 1
- Name: crdb_internal_index_1_name_placeholder
- TableID: 104
- *scop.SetIndexName
- IndexID: 2
- Name: act_pkey
- TableID: 104
- *scop.MakeValidatedPrimaryIndexPublic
- IndexID: 2
- TableID: 104
- *scop.MakeAbsentTempIndexDeleteOnly
- Index:
- ConstraintID: 5
- IndexID: 5
- IsUnique: true
- SourceIndexID: 2
- TableID: 104
- *scop.MaybeAddSplitForIndex
- IndexID: 5
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 1
- IndexID: 5
- TableID: 104
- *scop.AddColumnToIndex
- ColumnID: 3
- IndexID: 5
- Kind: 2
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- JobID: 1
-PostCommitPhase stage 9 of 15 with 3 MutationType ops
- transitions:
- [[TemporaryIndex:{DescID: 104, IndexID: 5, ConstraintID: 5, SourceIndexID: 2}, TRANSIENT_ABSENT], DELETE_ONLY] -> WRITE_ONLY
- [[IndexData:{DescID: 104, IndexID: 5}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC
- ops:
- *scop.MakeDeleteOnlyIndexWriteOnly
- IndexID: 5
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- JobID: 1
-PostCommitPhase stage 10 of 15 with 1 BackfillType op
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 4, ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2}, PUBLIC], BACKFILL_ONLY] -> BACKFILLED
- ops:
- *scop.BackfillIndex
- IndexID: 4
- SourceIndexID: 2
- TableID: 104
-PostCommitPhase stage 11 of 15 with 3 MutationType ops
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 4, ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2}, PUBLIC], BACKFILLED] -> DELETE_ONLY
- ops:
- *scop.MakeBackfillingIndexDeleteOnly
- IndexID: 4
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- JobID: 1
-PostCommitPhase stage 12 of 15 with 3 MutationType ops
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 4, ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2}, PUBLIC], DELETE_ONLY] -> MERGE_ONLY
- ops:
- *scop.MakeBackfilledIndexMerging
- IndexID: 4
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- JobID: 1
-PostCommitPhase stage 13 of 15 with 1 BackfillType op
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 4, ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2}, PUBLIC], MERGE_ONLY] -> MERGED
- ops:
- *scop.MergeIndex
- BackfilledIndexID: 4
- TableID: 104
- TemporaryIndexID: 5
-PostCommitPhase stage 14 of 15 with 4 MutationType ops
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 4, ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2}, PUBLIC], MERGED] -> WRITE_ONLY
- [[TemporaryIndex:{DescID: 104, IndexID: 5, ConstraintID: 5, SourceIndexID: 2}, TRANSIENT_ABSENT], WRITE_ONLY] -> TRANSIENT_DELETE_ONLY
- ops:
- *scop.MakeWriteOnlyIndexDeleteOnly
- IndexID: 5
- TableID: 104
- *scop.MakeMergedIndexWriteOnly
- IndexID: 4
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- JobID: 1
-PostCommitPhase stage 15 of 15 with 1 ValidationType op
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 4, ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2}, PUBLIC], WRITE_ONLY] -> VALIDATED
- ops:
- *scop.ValidateIndex
- IndexID: 4
- TableID: 104
-PostCommitNonRevertiblePhase stage 1 of 4 with 16 MutationType ops
- transitions:
- [[Column:{DescID: 104, ColumnID: 2}, ABSENT], PUBLIC] -> WRITE_ONLY
- [[ColumnName:{DescID: 104, Name: c1, ColumnID: 2}, ABSENT], PUBLIC] -> ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 1}, ABSENT], PUBLIC] -> ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 1}, ABSENT], PUBLIC] -> ABSENT
- [[PrimaryIndex:{DescID: 104, IndexID: 1, ConstraintID: 1}, ABSENT], VALIDATED] -> DELETE_ONLY
- [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], TRANSIENT_DELETE_ONLY] -> TRANSIENT_ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 3}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 3}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- [[TemporaryIndex:{DescID: 104, IndexID: 5, ConstraintID: 5, SourceIndexID: 2}, TRANSIENT_ABSENT], TRANSIENT_DELETE_ONLY] -> TRANSIENT_ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 5}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- [[ColumnComputeExpression:{DescID: 104, ColumnID: 3}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 3}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 5}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- [[ColumnName:{DescID: 104, Name: c1_shadow, ColumnID: 2}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- ops:
- *scop.RemoveColumnFromIndex
- ColumnID: 1
- IndexID: 3
- TableID: 104
- *scop.RemoveColumnFromIndex
- ColumnID: 1
- IndexID: 5
- TableID: 104
- *scop.RemoveColumnComputeExpression
- ColumnID: 3
- TableID: 104
- *scop.RemoveColumnFromIndex
- ColumnID: 3
- IndexID: 3
- Kind: 2
- Ordinal: 1
- TableID: 104
- *scop.RemoveColumnFromIndex
- ColumnID: 3
- IndexID: 5
- Kind: 2
- TableID: 104
- *scop.MakePublicColumnWriteOnly
- ColumnID: 2
- TableID: 104
- *scop.SetColumnName
- ColumnID: 2
- Name: crdb_internal_column_2_name_placeholder
- TableID: 104
- *scop.MakeWriteOnlyIndexDeleteOnly
- IndexID: 1
- TableID: 104
- *scop.RemoveColumnFromIndex
- ColumnID: 2
- IndexID: 3
- Kind: 2
- TableID: 104
- *scop.MakeIndexAbsent
- IndexID: 5
- TableID: 104
- *scop.SetColumnName
- ColumnID: 2
- Name: c1
- TableID: 104
- *scop.RemoveColumnFromIndex
- ColumnID: 1
- IndexID: 1
- TableID: 104
- *scop.RemoveColumnFromIndex
- ColumnID: 2
- IndexID: 1
- Kind: 2
- TableID: 104
- *scop.MakeIndexAbsent
- IndexID: 3
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- IsNonCancelable: true
- JobID: 1
-PostCommitNonRevertiblePhase stage 2 of 4 with 10 MutationType ops
- transitions:
- [[Column:{DescID: 104, ColumnID: 2}, ABSENT], WRITE_ONLY] -> DELETE_ONLY
- [[PrimaryIndex:{DescID: 104, IndexID: 1, ConstraintID: 1}, ABSENT], DELETE_ONLY] -> ABSENT
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_VALIDATED
- [[IndexName:{DescID: 104, Name: act_pkey, IndexID: 2}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- [[PrimaryIndex:{DescID: 104, IndexID: 4, ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2}, PUBLIC], VALIDATED] -> PUBLIC
- [[IndexName:{DescID: 104, Name: act_pkey, IndexID: 4}, PUBLIC], ABSENT] -> PUBLIC
- [[Column:{DescID: 104, ColumnID: 3}, PUBLIC], WRITE_ONLY] -> PUBLIC
- ops:
- *scop.MakeWriteOnlyColumnDeleteOnly
- ColumnID: 2
- TableID: 104
- *scop.MakeIndexAbsent
- IndexID: 1
- TableID: 104
- *scop.MakePublicPrimaryIndexWriteOnly
- IndexID: 2
- TableID: 104
- *scop.SetIndexName
- IndexID: 2
- Name: crdb_internal_index_2_name_placeholder
- TableID: 104
- *scop.SetIndexName
- IndexID: 4
- Name: act_pkey
- TableID: 104
- *scop.MakeValidatedPrimaryIndexPublic
- IndexID: 4
- TableID: 104
- *scop.MakeWriteOnlyColumnPublic
- ColumnID: 3
- TableID: 104
- *scop.RefreshStats
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- IsNonCancelable: true
- JobID: 1
-PostCommitNonRevertiblePhase stage 3 of 4 with 6 MutationType ops
- transitions:
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], TRANSIENT_VALIDATED] -> TRANSIENT_DELETE_ONLY
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 2}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 2}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- [[IndexColumn:{DescID: 104, ColumnID: 3, IndexID: 2}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- ops:
- *scop.MakeWriteOnlyIndexDeleteOnly
- IndexID: 2
- TableID: 104
- *scop.RemoveColumnFromIndex
- ColumnID: 1
- IndexID: 2
- TableID: 104
- *scop.RemoveColumnFromIndex
- ColumnID: 2
- IndexID: 2
- Kind: 2
- TableID: 104
- *scop.RemoveColumnFromIndex
- ColumnID: 3
- IndexID: 2
- Kind: 2
- Ordinal: 1
- TableID: 104
- *scop.SetJobStateOnDescriptor
- DescriptorID: 104
- *scop.UpdateSchemaChangerJob
- IsNonCancelable: true
- JobID: 1
-PostCommitNonRevertiblePhase stage 4 of 4 with 8 MutationType ops
- transitions:
- [[Column:{DescID: 104, ColumnID: 2}, ABSENT], DELETE_ONLY] -> ABSENT
- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT4}, ABSENT], PUBLIC] -> ABSENT
- [[IndexData:{DescID: 104, IndexID: 1}, ABSENT], PUBLIC] -> ABSENT
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], TRANSIENT_DELETE_ONLY] -> TRANSIENT_ABSENT
- [[IndexData:{DescID: 104, IndexID: 2}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- [[IndexData:{DescID: 104, IndexID: 3}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- [[IndexData:{DescID: 104, IndexID: 5}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT
- ops:
- *scop.CreateGCJobForIndex
- IndexID: 1
- StatementForDropJob:
- Statement: ALTER TABLE defaultdb.public.act ALTER COLUMN c1 SET DATA TYPE STRING
- TableID: 104
- *scop.MakeIndexAbsent
- IndexID: 2
- TableID: 104
- *scop.CreateGCJobForIndex
- IndexID: 2
- StatementForDropJob:
- Statement: ALTER TABLE defaultdb.public.act ALTER COLUMN c1 SET DATA TYPE STRING
- TableID: 104
- *scop.CreateGCJobForIndex
- IndexID: 3
- StatementForDropJob:
- Statement: ALTER TABLE defaultdb.public.act ALTER COLUMN c1 SET DATA TYPE STRING
- TableID: 104
- *scop.CreateGCJobForIndex
- IndexID: 5
- StatementForDropJob:
- Statement: ALTER TABLE defaultdb.public.act ALTER COLUMN c1 SET DATA TYPE STRING
- TableID: 104
- *scop.MakeDeleteOnlyColumnAbsent
- ColumnID: 2
- TableID: 104
- *scop.RemoveJobStateFromDescriptor
- DescriptorID: 104
- JobID: 1
- *scop.UpdateSchemaChangerJob
- DescriptorIDsToRemove:
- - 104
- IsNonCancelable: true
- JobID: 1
diff --git a/pkg/sql/schemachanger/sctest/end_to_end.go b/pkg/sql/schemachanger/sctest/end_to_end.go
index 814e98f0efeb..4af5d76b58cf 100644
--- a/pkg/sql/schemachanger/sctest/end_to_end.go
+++ b/pkg/sql/schemachanger/sctest/end_to_end.go
@@ -139,7 +139,6 @@ func EndToEndSideEffects(t *testing.T, relTestCaseDir string, factory TestServer
sd.TempTablesEnabled = true
sd.ApplicationName = ""
sd.EnableUniqueWithoutIndexConstraints = true // this allows `ADD UNIQUE WITHOUT INDEX` in the testing suite.
- sd.AlterColumnTypeGeneralEnabled = true
})),
sctestdeps.WithTestingKnobs(&scexec.TestingKnobs{
BeforeStage: func(p scplan.Plan, stageIdx int) error {
diff --git a/pkg/sql/schemachanger/sctest_generated_test.go b/pkg/sql/schemachanger/sctest_generated_test.go
index 4205f7adf715..c44936da49dd 100644
--- a/pkg/sql/schemachanger/sctest_generated_test.go
+++ b/pkg/sql/schemachanger/sctest_generated_test.go
@@ -169,13 +169,6 @@ func TestEndToEndSideEffects_alter_table_alter_column_set_not_null(t *testing.T)
sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
}
-func TestEndToEndSideEffects_alter_table_alter_column_type_general(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
- const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general"
- sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{})
-}
-
func TestEndToEndSideEffects_alter_table_alter_column_type_noop(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
@@ -694,13 +687,6 @@ func TestExecuteWithDMLInjection_alter_table_alter_column_set_not_null(t *testin
sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
}
-func TestExecuteWithDMLInjection_alter_table_alter_column_type_general(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
- const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general"
- sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{})
-}
-
func TestExecuteWithDMLInjection_alter_table_alter_column_type_noop(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
@@ -1219,13 +1205,6 @@ func TestGenerateSchemaChangeCorpus_alter_table_alter_column_set_not_null(t *tes
sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
}
-func TestGenerateSchemaChangeCorpus_alter_table_alter_column_type_general(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
- const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general"
- sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{})
-}
-
func TestGenerateSchemaChangeCorpus_alter_table_alter_column_type_noop(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
@@ -1744,13 +1723,6 @@ func TestPause_alter_table_alter_column_set_not_null(t *testing.T) {
sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
}
-func TestPause_alter_table_alter_column_type_general(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
- const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general"
- sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{})
-}
-
func TestPause_alter_table_alter_column_type_noop(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
@@ -2269,13 +2241,6 @@ func TestPauseMixedVersion_alter_table_alter_column_set_not_null(t *testing.T) {
sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
}
-func TestPauseMixedVersion_alter_table_alter_column_type_general(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
- const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general"
- sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{})
-}
-
func TestPauseMixedVersion_alter_table_alter_column_type_noop(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
@@ -2794,13 +2759,6 @@ func TestRollback_alter_table_alter_column_set_not_null(t *testing.T) {
sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
}
-func TestRollback_alter_table_alter_column_type_general(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
- const path = "pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general"
- sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{})
-}
-
func TestRollback_alter_table_alter_column_type_noop(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general.definition b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general.definition
deleted file mode 100644
index fe4067917e16..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general.definition
+++ /dev/null
@@ -1,19 +0,0 @@
-setup
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-----
-
-stage-exec phase=PostCommitPhase stage=:
-INSERT INTO t VALUES (10+$stageKey, '$stageKey');
-----
-
-# One row is expected to be added after each stage.
-stage-query phase=PostCommitPhase stage=:
-SELECT count(*)=$successfulStageCount FROM t WHERE i > 3;
-----
-true
-
-test
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-----
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general.explain
deleted file mode 100644
index a81ea49f1f05..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general.explain
+++ /dev/null
@@ -1,322 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-EXPLAIN (DDL) ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-----
-Schema change plan for ALTER TABLE ‹defaultdb›.‹public›.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- ├── StatementPhase
- │ └── Stage 1 of 1 in StatementPhase
- │ ├── 7 elements transitioning toward PUBLIC
- │ │ ├── ABSENT → BACKFILL_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey+), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey+)}
- │ │ ├── ABSENT → PUBLIC IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey+)}
- │ │ ├── ABSENT → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j+)}
- │ │ ├── ABSENT → PUBLIC ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j+)}
- │ │ ├── ABSENT → PUBLIC ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j+), TypeName: "INT8"}
- │ │ └── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 4 (t_pkey+)}
- │ ├── 11 elements transitioning toward TRANSIENT_ABSENT
- │ │ ├── ABSENT → BACKFILL_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey~)}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 2 (j-j_shadow~), IndexID: 2 (t_pkey~)}
- │ │ ├── ABSENT → PUBLIC IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey~)}
- │ │ ├── ABSENT → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 2 (j-j_shadow~), IndexID: 3}
- │ │ ├── ABSENT → PUBLIC ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j+)}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 2 (t_pkey~)}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 3}
- │ │ └── ABSENT → PUBLIC ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j-j_shadow~)}
- │ └── 16 Mutation operations
- │ ├── MakeAbsentColumnDeleteOnly {"Column":{"ColumnID":3,"PgAttributeNum":2,"TableID":104}}
- │ ├── SetColumnName {"ColumnID":3,"Name":"j","TableID":104}
- │ ├── UpsertColumnType {"ColumnType":{"ColumnID":3,"IsNullable":true,"TableID":104}}
- │ ├── AddColumnComputeExpression {"ComputeExpression":{"ColumnID":3,"TableID":104}}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j_shadow","TableID":104}
- │ ├── MakeAbsentIndexBackfilling {"Index":{"ConstraintID":2,"IndexID":2,"IsUnique":true,"SourceIndexID":1,"TableID":104,"TemporaryIndexID":3}}
- │ ├── AddColumnToIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── AddColumnToIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── MakeAbsentTempIndexDeleteOnly {"Index":{"ConstraintID":3,"IndexID":3,"IsUnique":true,"SourceIndexID":1,"TableID":104}}
- │ ├── AddColumnToIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── AddColumnToIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── MakeAbsentIndexBackfilling {"Index":{"ConstraintID":4,"IndexID":4,"IsUnique":true,"SourceIndexID":2,"TableID":104,"TemporaryIndexID":5}}
- │ ├── AddColumnToIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── AddColumnToIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── AddColumnToIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ └── AddColumnToIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- ├── PreCommitPhase
- │ ├── Stage 1 of 2 in PreCommitPhase
- │ │ ├── 7 elements transitioning toward PUBLIC
- │ │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey+), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey+)}
- │ │ │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey+)}
- │ │ │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j+)}
- │ │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j+)}
- │ │ │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j+), TypeName: "INT8"}
- │ │ │ └── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 4 (t_pkey+)}
- │ │ ├── 11 elements transitioning toward TRANSIENT_ABSENT
- │ │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey~)}
- │ │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j-j_shadow~), IndexID: 2 (t_pkey~)}
- │ │ │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey~)}
- │ │ │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j-j_shadow~), IndexID: 3}
- │ │ │ ├── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j+)}
- │ │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 2 (t_pkey~)}
- │ │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 3}
- │ │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j-j_shadow~)}
- │ │ └── 1 Mutation operation
- │ │ └── UndoAllInTxnImmediateMutationOpSideEffects
- │ └── Stage 2 of 2 in PreCommitPhase
- │ ├── 7 elements transitioning toward PUBLIC
- │ │ ├── ABSENT → BACKFILL_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey+), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey+)}
- │ │ ├── ABSENT → PUBLIC IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey+)}
- │ │ ├── ABSENT → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j+)}
- │ │ ├── ABSENT → PUBLIC ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j+)}
- │ │ ├── ABSENT → PUBLIC ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j+), TypeName: "INT8"}
- │ │ └── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 4 (t_pkey+)}
- │ ├── 11 elements transitioning toward TRANSIENT_ABSENT
- │ │ ├── ABSENT → BACKFILL_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey~)}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 2 (j-j_shadow~), IndexID: 2 (t_pkey~)}
- │ │ ├── ABSENT → PUBLIC IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey~)}
- │ │ ├── ABSENT → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 2 (j-j_shadow~), IndexID: 3}
- │ │ ├── ABSENT → PUBLIC ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j+)}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 2 (t_pkey~)}
- │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 3}
- │ │ └── ABSENT → PUBLIC ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j-j_shadow~)}
- │ └── 21 Mutation operations
- │ ├── MakeAbsentColumnDeleteOnly {"Column":{"ColumnID":3,"PgAttributeNum":2,"TableID":104}}
- │ ├── SetColumnName {"ColumnID":3,"Name":"j","TableID":104}
- │ ├── UpsertColumnType {"ColumnType":{"ColumnID":3,"IsNullable":true,"TableID":104}}
- │ ├── AddColumnComputeExpression {"ComputeExpression":{"ColumnID":3,"TableID":104}}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j_shadow","TableID":104}
- │ ├── MakeAbsentIndexBackfilling {"Index":{"ConstraintID":2,"IndexID":2,"IsUnique":true,"SourceIndexID":1,"TableID":104,"TemporaryIndexID":3}}
- │ ├── MaybeAddSplitForIndex {"IndexID":2,"TableID":104}
- │ ├── AddColumnToIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── AddColumnToIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── MakeAbsentTempIndexDeleteOnly {"Index":{"ConstraintID":3,"IndexID":3,"IsUnique":true,"SourceIndexID":1,"TableID":104}}
- │ ├── MaybeAddSplitForIndex {"IndexID":3,"TableID":104}
- │ ├── AddColumnToIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── AddColumnToIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── MakeAbsentIndexBackfilling {"Index":{"ConstraintID":4,"IndexID":4,"IsUnique":true,"SourceIndexID":2,"TableID":104,"TemporaryIndexID":5}}
- │ ├── MaybeAddSplitForIndex {"IndexID":4,"TableID":104}
- │ ├── AddColumnToIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── AddColumnToIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── AddColumnToIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── AddColumnToIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104,"Initialize":true}
- │ └── CreateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."}
- ├── PostCommitPhase
- │ ├── Stage 1 of 15 in PostCommitPhase
- │ │ ├── 1 element transitioning toward PUBLIC
- │ │ │ └── DELETE_ONLY → WRITE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j+)}
- │ │ ├── 2 elements transitioning toward TRANSIENT_ABSENT
- │ │ │ ├── DELETE_ONLY → WRITE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ │ └── ABSENT → PUBLIC IndexData:{DescID: 104 (t), IndexID: 3}
- │ │ └── 4 Mutation operations
- │ │ ├── MakeDeleteOnlyColumnWriteOnly {"ColumnID":3,"TableID":104}
- │ │ ├── MakeDeleteOnlyIndexWriteOnly {"IndexID":3,"TableID":104}
- │ │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."}
- │ ├── Stage 2 of 15 in PostCommitPhase
- │ │ ├── 1 element transitioning toward TRANSIENT_ABSENT
- │ │ │ └── BACKFILL_ONLY → BACKFILLED PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ └── 1 Backfill operation
- │ │ └── BackfillIndex {"IndexID":2,"SourceIndexID":1,"TableID":104}
- │ ├── Stage 3 of 15 in PostCommitPhase
- │ │ ├── 1 element transitioning toward TRANSIENT_ABSENT
- │ │ │ └── BACKFILLED → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ └── 3 Mutation operations
- │ │ ├── MakeBackfillingIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."}
- │ ├── Stage 4 of 15 in PostCommitPhase
- │ │ ├── 1 element transitioning toward TRANSIENT_ABSENT
- │ │ │ └── DELETE_ONLY → MERGE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ └── 3 Mutation operations
- │ │ ├── MakeBackfilledIndexMerging {"IndexID":2,"TableID":104}
- │ │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."}
- │ ├── Stage 5 of 15 in PostCommitPhase
- │ │ ├── 1 element transitioning toward TRANSIENT_ABSENT
- │ │ │ └── MERGE_ONLY → MERGED PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ └── 1 Backfill operation
- │ │ └── MergeIndex {"BackfilledIndexID":2,"TableID":104,"TemporaryIndexID":3}
- │ ├── Stage 6 of 15 in PostCommitPhase
- │ │ ├── 2 elements transitioning toward TRANSIENT_ABSENT
- │ │ │ ├── MERGED → WRITE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ │ └── WRITE_ONLY → TRANSIENT_DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ └── 4 Mutation operations
- │ │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":3,"TableID":104}
- │ │ ├── MakeMergedIndexWriteOnly {"IndexID":2,"TableID":104}
- │ │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."}
- │ ├── Stage 7 of 15 in PostCommitPhase
- │ │ ├── 1 element transitioning toward TRANSIENT_ABSENT
- │ │ │ └── WRITE_ONLY → VALIDATED PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ └── 1 Validation operation
- │ │ └── ValidateIndex {"IndexID":2,"TableID":104}
- │ ├── Stage 8 of 15 in PostCommitPhase
- │ │ ├── 5 elements transitioning toward TRANSIENT_ABSENT
- │ │ │ ├── VALIDATED → PUBLIC PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ │ ├── ABSENT → PUBLIC IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 2 (t_pkey~)}
- │ │ │ ├── ABSENT → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 5}
- │ │ │ └── ABSENT → PUBLIC IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 5}
- │ │ ├── 2 elements transitioning toward ABSENT
- │ │ │ ├── PUBLIC → VALIDATED PrimaryIndex:{DescID: 104 (t), IndexID: 1 (t_pkey-), ConstraintID: 1}
- │ │ │ └── PUBLIC → ABSENT IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 1 (t_pkey-)}
- │ │ └── 10 Mutation operations
- │ │ ├── MakePublicPrimaryIndexWriteOnly {"IndexID":1,"TableID":104}
- │ │ ├── SetIndexName {"IndexID":1,"Name":"crdb_internal_in...","TableID":104}
- │ │ ├── SetIndexName {"IndexID":2,"Name":"t_pkey","TableID":104}
- │ │ ├── MakeValidatedPrimaryIndexPublic {"IndexID":2,"TableID":104}
- │ │ ├── MakeAbsentTempIndexDeleteOnly {"Index":{"ConstraintID":5,"IndexID":5,"IsUnique":true,"SourceIndexID":2,"TableID":104}}
- │ │ ├── MaybeAddSplitForIndex {"IndexID":5,"TableID":104}
- │ │ ├── AddColumnToIndex {"ColumnID":1,"IndexID":5,"TableID":104}
- │ │ ├── AddColumnToIndex {"ColumnID":3,"IndexID":5,"Kind":2,"TableID":104}
- │ │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."}
- │ ├── Stage 9 of 15 in PostCommitPhase
- │ │ ├── 2 elements transitioning toward TRANSIENT_ABSENT
- │ │ │ ├── DELETE_ONLY → WRITE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ │ └── ABSENT → PUBLIC IndexData:{DescID: 104 (t), IndexID: 5}
- │ │ └── 3 Mutation operations
- │ │ ├── MakeDeleteOnlyIndexWriteOnly {"IndexID":5,"TableID":104}
- │ │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."}
- │ ├── Stage 10 of 15 in PostCommitPhase
- │ │ ├── 1 element transitioning toward PUBLIC
- │ │ │ └── BACKFILL_ONLY → BACKFILLED PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey+), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ └── 1 Backfill operation
- │ │ └── BackfillIndex {"IndexID":4,"SourceIndexID":2,"TableID":104}
- │ ├── Stage 11 of 15 in PostCommitPhase
- │ │ ├── 1 element transitioning toward PUBLIC
- │ │ │ └── BACKFILLED → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey+), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ └── 3 Mutation operations
- │ │ ├── MakeBackfillingIndexDeleteOnly {"IndexID":4,"TableID":104}
- │ │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."}
- │ ├── Stage 12 of 15 in PostCommitPhase
- │ │ ├── 1 element transitioning toward PUBLIC
- │ │ │ └── DELETE_ONLY → MERGE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey+), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ └── 3 Mutation operations
- │ │ ├── MakeBackfilledIndexMerging {"IndexID":4,"TableID":104}
- │ │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."}
- │ ├── Stage 13 of 15 in PostCommitPhase
- │ │ ├── 1 element transitioning toward PUBLIC
- │ │ │ └── MERGE_ONLY → MERGED PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey+), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ └── 1 Backfill operation
- │ │ └── MergeIndex {"BackfilledIndexID":4,"TableID":104,"TemporaryIndexID":5}
- │ ├── Stage 14 of 15 in PostCommitPhase
- │ │ ├── 1 element transitioning toward PUBLIC
- │ │ │ └── MERGED → WRITE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey+), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ ├── 1 element transitioning toward TRANSIENT_ABSENT
- │ │ │ └── WRITE_ONLY → TRANSIENT_DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ └── 4 Mutation operations
- │ │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":5,"TableID":104}
- │ │ ├── MakeMergedIndexWriteOnly {"IndexID":4,"TableID":104}
- │ │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."}
- │ └── Stage 15 of 15 in PostCommitPhase
- │ ├── 1 element transitioning toward PUBLIC
- │ │ └── WRITE_ONLY → VALIDATED PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey+), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey~)}
- │ └── 1 Validation operation
- │ └── ValidateIndex {"IndexID":4,"TableID":104}
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 4 in PostCommitNonRevertiblePhase
- │ ├── 9 elements transitioning toward TRANSIENT_ABSENT
- │ │ ├── TRANSIENT_DELETE_ONLY → TRANSIENT_ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ ├── PUBLIC → TRANSIENT_ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → TRANSIENT_ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j-j_shadow~), IndexID: 3}
- │ │ ├── TRANSIENT_DELETE_ONLY → TRANSIENT_ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ ├── PUBLIC → TRANSIENT_ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 5}
- │ │ ├── PUBLIC → TRANSIENT_ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j+)}
- │ │ ├── PUBLIC → TRANSIENT_ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 3}
- │ │ ├── PUBLIC → TRANSIENT_ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 5}
- │ │ └── PUBLIC → TRANSIENT_ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j-j_shadow~)}
- │ ├── 5 elements transitioning toward ABSENT
- │ │ ├── PUBLIC → WRITE_ONLY Column:{DescID: 104 (t), ColumnID: 2 (j-j_shadow~)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 2 (j-j_shadow~)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 1 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j-j_shadow~), IndexID: 1 (t_pkey-)}
- │ │ └── VALIDATED → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 1 (t_pkey-), ConstraintID: 1}
- │ └── 16 Mutation operations
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":5,"TableID":104}
- │ ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":5,"Kind":2,"TableID":104}
- │ ├── MakePublicColumnWriteOnly {"ColumnID":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"crdb_internal_co...","TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":5,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":1,"Kind":2,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- ├── Stage 2 of 4 in PostCommitNonRevertiblePhase
- │ ├── 3 elements transitioning toward PUBLIC
- │ │ ├── VALIDATED → PUBLIC PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey+), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey~)}
- │ │ ├── ABSENT → PUBLIC IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 4 (t_pkey+)}
- │ │ └── WRITE_ONLY → PUBLIC Column:{DescID: 104 (t), ColumnID: 3 (j+)}
- │ ├── 2 elements transitioning toward TRANSIENT_ABSENT
- │ │ ├── PUBLIC → TRANSIENT_VALIDATED PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ └── PUBLIC → TRANSIENT_ABSENT IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 2 (t_pkey~)}
- │ ├── 2 elements transitioning toward ABSENT
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 2 (j-j_shadow~)}
- │ │ └── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 1 (t_pkey-), ConstraintID: 1}
- │ └── 10 Mutation operations
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":2,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":1,"TableID":104}
- │ ├── MakePublicPrimaryIndexWriteOnly {"IndexID":2,"TableID":104}
- │ ├── SetIndexName {"IndexID":2,"Name":"crdb_internal_in...","TableID":104}
- │ ├── SetIndexName {"IndexID":4,"Name":"t_pkey","TableID":104}
- │ ├── MakeValidatedPrimaryIndexPublic {"IndexID":4,"TableID":104}
- │ ├── MakeWriteOnlyColumnPublic {"ColumnID":3,"TableID":104}
- │ ├── RefreshStats {"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- ├── Stage 3 of 4 in PostCommitNonRevertiblePhase
- │ ├── 4 elements transitioning toward TRANSIENT_ABSENT
- │ │ ├── TRANSIENT_VALIDATED → TRANSIENT_DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ │ ├── PUBLIC → TRANSIENT_ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey~)}
- │ │ ├── PUBLIC → TRANSIENT_ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j-j_shadow~), IndexID: 2 (t_pkey~)}
- │ │ └── PUBLIC → TRANSIENT_ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j+), IndexID: 2 (t_pkey~)}
- │ └── 6 Mutation operations
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 4 of 4 in PostCommitNonRevertiblePhase
- ├── 4 elements transitioning toward TRANSIENT_ABSENT
- │ ├── TRANSIENT_DELETE_ONLY → TRANSIENT_ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey~), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey-)}
- │ ├── PUBLIC → TRANSIENT_ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey~)}
- │ ├── PUBLIC → TRANSIENT_ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ └── PUBLIC → TRANSIENT_ABSENT IndexData:{DescID: 104 (t), IndexID: 5}
- ├── 3 elements transitioning toward ABSENT
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 2 (j-j_shadow~)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 2 (j-j_shadow~), TypeName: "STRING"}
- │ └── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 1 (t_pkey-)}
- └── 8 Mutation operations
- ├── CreateGCJobForIndex {"IndexID":1,"TableID":104}
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":5,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":2,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general.explain_shape b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general.explain_shape
deleted file mode 100644
index d2d191ad8f14..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general.explain_shape
+++ /dev/null
@@ -1,26 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-EXPLAIN (DDL, SHAPE) ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-----
-Schema change plan for ALTER TABLE ‹defaultdb›.‹public›.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- ├── execute 2 system table mutations transactions
- ├── backfill using primary index t_pkey- in relation t
- │ └── into t_pkey~ (i; j-j_shadow~, j+)
- ├── execute 2 system table mutations transactions
- ├── merge temporary indexes into backfilled indexes in relation t
- │ └── from t@[3] into t_pkey~
- ├── execute 1 system table mutations transaction
- ├── validate UNIQUE constraint backed by index t_pkey~ in relation t
- ├── execute 2 system table mutations transactions
- ├── backfill using primary index t_pkey~ in relation t
- │ └── into t_pkey+ (i; j+)
- ├── execute 2 system table mutations transactions
- ├── merge temporary indexes into backfilled indexes in relation t
- │ └── from t@[5] into t_pkey+
- ├── execute 1 system table mutations transaction
- ├── validate UNIQUE constraint backed by index t_pkey+ in relation t
- └── execute 4 system table mutations transactions
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general.side_effects b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general.side_effects
deleted file mode 100644
index b89a20d02dd5..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general.side_effects
+++ /dev/null
@@ -1,1084 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-----
-...
-+object {100 101 t} -> 104
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-----
-begin transaction #1
-# begin StatementPhase
-checking for feature: ALTER TABLE
-increment telemetry for sql.schema.alter_table
-increment telemetry for sql.schema.alter_table.alter_column_type
-## StatementPhase stage 1 of 1 with 16 MutationType ops
-upsert descriptor #104
- ...
- width: 64
- - id: 2
- - name: j
- + name: j_shadow
- nullable: true
- type:
- ...
- - 1
- - 2
- + - 3
- columnNames:
- - i
- + - j_shadow
- - j
- defaultColumnId: 2
- ...
- id: 104
- modificationTime: {}
- + mutations:
- + - column:
- + computeExpr: j_shadow::INT8
- + id: 3
- + name: j
- + nullable: true
- + pgAttributeNum: 2
- + type:
- + family: IntFamily
- + oid: 20
- + width: 64
- + direction: ADD
- + mutationId: 1
- + state: DELETE_ONLY
- + - direction: ADD
- + index:
- + constraintId: 2
- + createdExplicitly: true
- + encodingType: 1
- + foreignKey: {}
- + geoConfig: {}
- + id: 2
- + interleave: {}
- + keyColumnDirections:
- + - ASC
- + keyColumnIds:
- + - 1
- + keyColumnNames:
- + - i
- + name: crdb_internal_index_2_name_placeholder
- + partitioning: {}
- + sharded: {}
- + storeColumnIds:
- + - 2
- + - 3
- + storeColumnNames:
- + - j_shadow
- + - j
- + unique: true
- + version: 4
- + mutationId: 1
- + state: BACKFILLING
- + - direction: ADD
- + index:
- + constraintId: 3
- + createdExplicitly: true
- + encodingType: 1
- + foreignKey: {}
- + geoConfig: {}
- + id: 3
- + interleave: {}
- + keyColumnDirections:
- + - ASC
- + keyColumnIds:
- + - 1
- + keyColumnNames:
- + - i
- + name: crdb_internal_index_3_name_placeholder
- + partitioning: {}
- + sharded: {}
- + storeColumnIds:
- + - 2
- + - 3
- + storeColumnNames:
- + - j_shadow
- + - j
- + unique: true
- + useDeletePreservingEncoding: true
- + version: 4
- + mutationId: 1
- + state: DELETE_ONLY
- + - direction: ADD
- + index:
- + constraintId: 4
- + createdExplicitly: true
- + encodingType: 1
- + foreignKey: {}
- + geoConfig: {}
- + id: 4
- + interleave: {}
- + keyColumnDirections:
- + - ASC
- + keyColumnIds:
- + - 1
- + keyColumnNames:
- + - i
- + name: crdb_internal_index_4_name_placeholder
- + partitioning: {}
- + sharded: {}
- + storeColumnIds:
- + - 3
- + storeColumnNames:
- + - j
- + unique: true
- + version: 4
- + mutationId: 1
- + state: BACKFILLING
- name: t
- - nextColumnId: 3
- - nextConstraintId: 2
- + nextColumnId: 4
- + nextConstraintId: 5
- nextFamilyId: 1
- - nextIndexId: 2
- + nextIndexId: 5
- nextMutationId: 1
- parentId: 100
- ...
- - 2
- storeColumnNames:
- - - j
- + - j_shadow
- unique: true
- version: 4
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "1"
- + version: "2"
-# end StatementPhase
-# begin PreCommitPhase
-## PreCommitPhase stage 1 of 2 with 1 MutationType op
-undo all catalog changes within txn #1
-persist all catalog changes to storage
-## PreCommitPhase stage 2 of 2 with 21 MutationType ops
-upsert descriptor #104
- ...
- width: 64
- - id: 2
- - name: j
- + name: j_shadow
- nullable: true
- type:
- ...
- createAsOfTime:
- wallTime: "1640995200000000000"
- + declarativeSchemaChangerState:
- + authorization:
- + userName: root
- + currentStatuses:
- + jobId: "1"
- + nameMapping:
- + columns:
- + "1": i
- + "3": j
- + "4294967292": crdb_internal_origin_timestamp
- + "4294967293": crdb_internal_origin_id
- + "4294967294": tableoid
- + "4294967295": crdb_internal_mvcc_timestamp
- + families:
- + "0": primary
- + id: 104
- + indexes:
- + "4": t_pkey
- + name: t
- + relevantStatements:
- + - statement:
- + redactedStatement: ALTER TABLE ‹defaultdb›.‹public›.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8
- + statement: ALTER TABLE t ALTER COLUMN j SET DATA TYPE INT8 USING j::INT8
- + statementTag: ALTER TABLE
- + revertible: true
- + targetRanks:
- + targets:
- families:
- - columnIds:
- - 1
- - 2
- + - 3
- columnNames:
- - i
- + - j_shadow
- - j
- defaultColumnId: 2
- ...
- id: 104
- modificationTime: {}
- + mutations:
- + - column:
- + computeExpr: j_shadow::INT8
- + id: 3
- + name: j
- + nullable: true
- + pgAttributeNum: 2
- + type:
- + family: IntFamily
- + oid: 20
- + width: 64
- + direction: ADD
- + mutationId: 1
- + state: DELETE_ONLY
- + - direction: ADD
- + index:
- + constraintId: 2
- + createdExplicitly: true
- + encodingType: 1
- + foreignKey: {}
- + geoConfig: {}
- + id: 2
- + interleave: {}
- + keyColumnDirections:
- + - ASC
- + keyColumnIds:
- + - 1
- + keyColumnNames:
- + - i
- + name: crdb_internal_index_2_name_placeholder
- + partitioning: {}
- + sharded: {}
- + storeColumnIds:
- + - 2
- + - 3
- + storeColumnNames:
- + - j_shadow
- + - j
- + unique: true
- + version: 4
- + mutationId: 1
- + state: BACKFILLING
- + - direction: ADD
- + index:
- + constraintId: 3
- + createdExplicitly: true
- + encodingType: 1
- + foreignKey: {}
- + geoConfig: {}
- + id: 3
- + interleave: {}
- + keyColumnDirections:
- + - ASC
- + keyColumnIds:
- + - 1
- + keyColumnNames:
- + - i
- + name: crdb_internal_index_3_name_placeholder
- + partitioning: {}
- + sharded: {}
- + storeColumnIds:
- + - 2
- + - 3
- + storeColumnNames:
- + - j_shadow
- + - j
- + unique: true
- + useDeletePreservingEncoding: true
- + version: 4
- + mutationId: 1
- + state: DELETE_ONLY
- + - direction: ADD
- + index:
- + constraintId: 4
- + createdExplicitly: true
- + encodingType: 1
- + foreignKey: {}
- + geoConfig: {}
- + id: 4
- + interleave: {}
- + keyColumnDirections:
- + - ASC
- + keyColumnIds:
- + - 1
- + keyColumnNames:
- + - i
- + name: crdb_internal_index_4_name_placeholder
- + partitioning: {}
- + sharded: {}
- + storeColumnIds:
- + - 3
- + storeColumnNames:
- + - j
- + unique: true
- + version: 4
- + mutationId: 1
- + state: BACKFILLING
- name: t
- - nextColumnId: 3
- - nextConstraintId: 2
- + nextColumnId: 4
- + nextConstraintId: 5
- nextFamilyId: 1
- - nextIndexId: 2
- + nextIndexId: 5
- nextMutationId: 1
- parentId: 100
- ...
- - 2
- storeColumnNames:
- - - j
- + - j_shadow
- unique: true
- version: 4
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "1"
- + version: "2"
-persist all catalog changes to storage
-create job #1 (non-cancelable: false): "ALTER TABLE defaultdb.public.t ALTER COLUMN j SET DATA TYPE INT8 USING j::INT8"
- descriptor IDs: [104]
-# end PreCommitPhase
-commit transaction #1
-notified job registry to adopt jobs: [1]
-# begin PostCommitPhase
-begin transaction #2
-commit transaction #2
-begin transaction #3
-## PostCommitPhase stage 1 of 15 with 4 MutationType ops
-upsert descriptor #104
- ...
- direction: ADD
- mutationId: 1
- - state: DELETE_ONLY
- + state: WRITE_ONLY
- - direction: ADD
- index:
- ...
- version: 4
- mutationId: 1
- - state: DELETE_ONLY
- + state: WRITE_ONLY
- - direction: ADD
- index:
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "2"
- + version: "3"
-persist all catalog changes to storage
-update progress of schema change job #1: "PostCommitPhase stage 2 of 15 with 1 BackfillType op pending"
-commit transaction #3
-begin transaction #4
-## PostCommitPhase stage 2 of 15 with 1 BackfillType op
-backfill indexes [2] from index #1 in table #104
-commit transaction #4
-begin transaction #5
-## PostCommitPhase stage 3 of 15 with 3 MutationType ops
-upsert descriptor #104
- ...
- version: 4
- mutationId: 1
- - state: BACKFILLING
- + state: DELETE_ONLY
- - direction: ADD
- index:
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "3"
- + version: "4"
-persist all catalog changes to storage
-update progress of schema change job #1: "PostCommitPhase stage 4 of 15 with 1 MutationType op pending"
-commit transaction #5
-begin transaction #6
-## PostCommitPhase stage 4 of 15 with 3 MutationType ops
-upsert descriptor #104
- ...
- version: 4
- mutationId: 1
- - state: DELETE_ONLY
- + state: MERGING
- - direction: ADD
- index:
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "4"
- + version: "5"
-persist all catalog changes to storage
-update progress of schema change job #1: "PostCommitPhase stage 5 of 15 with 1 BackfillType op pending"
-commit transaction #6
-begin transaction #7
-## PostCommitPhase stage 5 of 15 with 1 BackfillType op
-merge temporary indexes [3] into backfilled indexes [2] in table #104
-commit transaction #7
-begin transaction #8
-## PostCommitPhase stage 6 of 15 with 4 MutationType ops
-upsert descriptor #104
- ...
- version: 4
- mutationId: 1
- - state: MERGING
- - - direction: ADD
- + state: WRITE_ONLY
- + - direction: DROP
- index:
- constraintId: 3
- ...
- version: 4
- mutationId: 1
- - state: WRITE_ONLY
- + state: DELETE_ONLY
- - direction: ADD
- index:
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "5"
- + version: "6"
-persist all catalog changes to storage
-update progress of schema change job #1: "PostCommitPhase stage 7 of 15 with 1 ValidationType op pending"
-commit transaction #8
-begin transaction #9
-## PostCommitPhase stage 7 of 15 with 1 ValidationType op
-validate forward indexes [2] in table #104
-commit transaction #9
-begin transaction #10
-## PostCommitPhase stage 8 of 15 with 10 MutationType ops
-upsert descriptor #104
- ...
- mutationId: 1
- state: WRITE_ONLY
- - - direction: ADD
- + - direction: DROP
- index:
- - constraintId: 2
- + constraintId: 3
- createdExplicitly: true
- encodingType: 1
- foreignKey: {}
- geoConfig: {}
- - id: 2
- + id: 3
- interleave: {}
- keyColumnDirections:
- ...
- keyColumnNames:
- - i
- - name: crdb_internal_index_2_name_placeholder
- + name: crdb_internal_index_3_name_placeholder
- partitioning: {}
- sharded: {}
- ...
- - j
- unique: true
- + useDeletePreservingEncoding: true
- version: 4
- mutationId: 1
- - state: WRITE_ONLY
- - - direction: DROP
- + state: DELETE_ONLY
- + - direction: ADD
- index:
- - constraintId: 3
- + constraintId: 4
- createdExplicitly: true
- encodingType: 1
- foreignKey: {}
- geoConfig: {}
- - id: 3
- + id: 4
- interleave: {}
- keyColumnDirections:
- ...
- keyColumnNames:
- - i
- - name: crdb_internal_index_3_name_placeholder
- + name: crdb_internal_index_4_name_placeholder
- partitioning: {}
- sharded: {}
- storeColumnIds:
- - - 2
- - 3
- storeColumnNames:
- - - j_shadow
- - j
- unique: true
- - useDeletePreservingEncoding: true
- version: 4
- mutationId: 1
- - state: DELETE_ONLY
- + state: BACKFILLING
- + - direction: DROP
- + index:
- + constraintId: 1
- + createdAtNanos: "1640995200000000000"
- + encodingType: 1
- + foreignKey: {}
- + geoConfig: {}
- + id: 1
- + interleave: {}
- + keyColumnDirections:
- + - ASC
- + keyColumnIds:
- + - 1
- + keyColumnNames:
- + - i
- + name: crdb_internal_index_1_name_placeholder
- + partitioning: {}
- + sharded: {}
- + storeColumnIds:
- + - 2
- + storeColumnNames:
- + - j_shadow
- + unique: true
- + version: 4
- + mutationId: 1
- + state: WRITE_ONLY
- - direction: ADD
- index:
- - constraintId: 4
- + constraintId: 5
- createdExplicitly: true
- encodingType: 1
- foreignKey: {}
- geoConfig: {}
- - id: 4
- + id: 5
- interleave: {}
- keyColumnDirections:
- ...
- keyColumnNames:
- - i
- - name: crdb_internal_index_4_name_placeholder
- + name: crdb_internal_index_5_name_placeholder
- partitioning: {}
- sharded: {}
- ...
- - j
- unique: true
- + useDeletePreservingEncoding: true
- version: 4
- mutationId: 1
- - state: BACKFILLING
- + state: DELETE_ONLY
- name: t
- nextColumnId: 4
- - nextConstraintId: 5
- + nextConstraintId: 6
- nextFamilyId: 1
- - nextIndexId: 5
- + nextIndexId: 6
- nextMutationId: 1
- parentId: 100
- primaryIndex:
- - constraintId: 1
- - createdAtNanos: "1640995200000000000"
- + constraintId: 2
- + createdExplicitly: true
- encodingType: 1
- foreignKey: {}
- geoConfig: {}
- - id: 1
- + id: 2
- interleave: {}
- keyColumnDirections:
- ...
- storeColumnIds:
- - 2
- + - 3
- storeColumnNames:
- - j_shadow
- + - j
- unique: true
- version: 4
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "6"
- + version: "7"
-persist all catalog changes to storage
-update progress of schema change job #1: "PostCommitPhase stage 9 of 15 with 1 MutationType op pending"
-commit transaction #10
-begin transaction #11
-## PostCommitPhase stage 9 of 15 with 3 MutationType ops
-upsert descriptor #104
- ...
- version: 4
- mutationId: 1
- - state: DELETE_ONLY
- + state: WRITE_ONLY
- name: t
- nextColumnId: 4
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "7"
- + version: "8"
-persist all catalog changes to storage
-update progress of schema change job #1: "PostCommitPhase stage 10 of 15 with 1 BackfillType op pending"
-commit transaction #11
-begin transaction #12
-## PostCommitPhase stage 10 of 15 with 1 BackfillType op
-backfill indexes [4] from index #2 in table #104
-commit transaction #12
-begin transaction #13
-## PostCommitPhase stage 11 of 15 with 3 MutationType ops
-upsert descriptor #104
- ...
- version: 4
- mutationId: 1
- - state: BACKFILLING
- + state: DELETE_ONLY
- - direction: DROP
- index:
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "8"
- + version: "9"
-persist all catalog changes to storage
-update progress of schema change job #1: "PostCommitPhase stage 12 of 15 with 1 MutationType op pending"
-commit transaction #13
-begin transaction #14
-## PostCommitPhase stage 12 of 15 with 3 MutationType ops
-upsert descriptor #104
- ...
- version: 4
- mutationId: 1
- - state: DELETE_ONLY
- + state: MERGING
- - direction: DROP
- index:
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "9"
- + version: "10"
-persist all catalog changes to storage
-update progress of schema change job #1: "PostCommitPhase stage 13 of 15 with 1 BackfillType op pending"
-commit transaction #14
-begin transaction #15
-## PostCommitPhase stage 13 of 15 with 1 BackfillType op
-merge temporary indexes [5] into backfilled indexes [4] in table #104
-commit transaction #15
-begin transaction #16
-## PostCommitPhase stage 14 of 15 with 4 MutationType ops
-upsert descriptor #104
- ...
- version: 4
- mutationId: 1
- - state: MERGING
- + state: WRITE_ONLY
- - direction: DROP
- index:
- ...
- mutationId: 1
- state: WRITE_ONLY
- - - direction: ADD
- + - direction: DROP
- index:
- constraintId: 5
- ...
- version: 4
- mutationId: 1
- - state: WRITE_ONLY
- + state: DELETE_ONLY
- name: t
- nextColumnId: 4
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "10"
- + version: "11"
-persist all catalog changes to storage
-update progress of schema change job #1: "PostCommitPhase stage 15 of 15 with 1 ValidationType op pending"
-commit transaction #16
-begin transaction #17
-## PostCommitPhase stage 15 of 15 with 1 ValidationType op
-validate forward indexes [4] in table #104
-commit transaction #17
-begin transaction #18
-## PostCommitNonRevertiblePhase stage 1 of 4 with 16 MutationType ops
-upsert descriptor #104
- ...
- oid: 20
- width: 64
- - - id: 2
- - name: j_shadow
- - nullable: true
- - type:
- - family: StringFamily
- - oid: 25
- createAsOfTime:
- wallTime: "1640995200000000000"
- ...
- statement: ALTER TABLE t ALTER COLUMN j SET DATA TYPE INT8 USING j::INT8
- statementTag: ALTER TABLE
- - revertible: true
- targetRanks:
- targets:
- ...
- columnNames:
- - i
- - - j_shadow
- - j
- + - j
- defaultColumnId: 2
- name: primary
- ...
- mutations:
- - column:
- - computeExpr: j_shadow::INT8
- id: 3
- name: j
- ...
- mutationId: 1
- state: WRITE_ONLY
- - - direction: DROP
- - index:
- - constraintId: 3
- - createdExplicitly: true
- - encodingType: 1
- - foreignKey: {}
- - geoConfig: {}
- - id: 3
- - interleave: {}
- - keyColumnDirections:
- - - ASC
- - keyColumnIds:
- - - 1
- - keyColumnNames:
- - - i
- - name: crdb_internal_index_3_name_placeholder
- - partitioning: {}
- - sharded: {}
- - storeColumnIds:
- - - 2
- - - 3
- - storeColumnNames:
- - - j_shadow
- - - j
- - unique: true
- - useDeletePreservingEncoding: true
- - version: 4
- - mutationId: 1
- - state: DELETE_ONLY
- - direction: ADD
- index:
- ...
- - 2
- storeColumnNames:
- - - j_shadow
- - unique: true
- - version: 4
- - mutationId: 1
- - state: WRITE_ONLY
- - - direction: DROP
- - index:
- - constraintId: 5
- - createdExplicitly: true
- - encodingType: 1
- - foreignKey: {}
- - geoConfig: {}
- - id: 5
- - interleave: {}
- - keyColumnDirections:
- - - ASC
- - keyColumnIds:
- - - 1
- - keyColumnNames:
- - - i
- - name: crdb_internal_index_5_name_placeholder
- - partitioning: {}
- - sharded: {}
- - storeColumnIds:
- - - 3
- - storeColumnNames:
- - j
- unique: true
- - useDeletePreservingEncoding: true
- version: 4
- mutationId: 1
- state: DELETE_ONLY
- + - column:
- + id: 2
- + name: j
- + nullable: true
- + type:
- + family: StringFamily
- + oid: 25
- + direction: DROP
- + mutationId: 1
- + state: WRITE_ONLY
- name: t
- nextColumnId: 4
- ...
- - 3
- storeColumnNames:
- - - j_shadow
- - j
- + - j
- unique: true
- version: 4
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "11"
- + version: "12"
-persist all catalog changes to storage
-update progress of schema change job #1: "PostCommitNonRevertiblePhase stage 2 of 4 with 8 MutationType ops pending"
-set schema change job #1 to non-cancellable
-commit transaction #18
-begin transaction #19
-## PostCommitNonRevertiblePhase stage 2 of 4 with 10 MutationType ops
-upsert descriptor #104
- ...
- oid: 20
- width: 64
- + - id: 3
- + name: j
- + nullable: true
- + pgAttributeNum: 2
- + type:
- + family: IntFamily
- + oid: 20
- + width: 64
- createAsOfTime:
- wallTime: "1640995200000000000"
- ...
- mutations:
- - column:
- - id: 3
- + id: 2
- name: j
- nullable: true
- - pgAttributeNum: 2
- type:
- - family: IntFamily
- - oid: 20
- - width: 64
- - direction: ADD
- + family: StringFamily
- + oid: 25
- + direction: DROP
- mutationId: 1
- - state: WRITE_ONLY
- - - direction: ADD
- + state: DELETE_ONLY
- + - direction: DROP
- index:
- - constraintId: 4
- + constraintId: 2
- createdExplicitly: true
- encodingType: 1
- foreignKey: {}
- geoConfig: {}
- - id: 4
- + id: 2
- interleave: {}
- keyColumnDirections:
- ...
- keyColumnNames:
- - i
- - name: crdb_internal_index_4_name_placeholder
- + name: crdb_internal_index_2_name_placeholder
- partitioning: {}
- sharded: {}
- storeColumnIds:
- + - 2
- - 3
- storeColumnNames:
- - j
- - unique: true
- - version: 4
- - mutationId: 1
- - state: WRITE_ONLY
- - - direction: DROP
- - index:
- - constraintId: 1
- - createdAtNanos: "1640995200000000000"
- - encodingType: 1
- - foreignKey: {}
- - geoConfig: {}
- - id: 1
- - interleave: {}
- - keyColumnDirections:
- - - ASC
- - keyColumnIds:
- - - 1
- - keyColumnNames:
- - - i
- - name: crdb_internal_index_1_name_placeholder
- - partitioning: {}
- - sharded: {}
- - storeColumnIds:
- - - 2
- - storeColumnNames:
- - j
- unique: true
- version: 4
- mutationId: 1
- - state: DELETE_ONLY
- - - column:
- - id: 2
- - name: j
- - nullable: true
- - type:
- - family: StringFamily
- - oid: 25
- - direction: DROP
- - mutationId: 1
- state: WRITE_ONLY
- name: t
- ...
- parentId: 100
- primaryIndex:
- - constraintId: 2
- + constraintId: 4
- createdExplicitly: true
- encodingType: 1
- foreignKey: {}
- geoConfig: {}
- - id: 2
- + id: 4
- interleave: {}
- keyColumnDirections:
- ...
- sharded: {}
- storeColumnIds:
- - - 2
- - 3
- storeColumnNames:
- - j
- - - j
- unique: true
- version: 4
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "12"
- + version: "13"
-persist all catalog changes to storage
-adding table for stats refresh: 104
-update progress of schema change job #1: "PostCommitNonRevertiblePhase stage 3 of 4 with 4 MutationType ops pending"
-commit transaction #19
-begin transaction #20
-## PostCommitNonRevertiblePhase stage 3 of 4 with 6 MutationType ops
-upsert descriptor #104
- ...
- version: 4
- mutationId: 1
- - state: WRITE_ONLY
- + state: DELETE_ONLY
- name: t
- nextColumnId: 4
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "13"
- + version: "14"
-persist all catalog changes to storage
-update progress of schema change job #1: "PostCommitNonRevertiblePhase stage 4 of 4 with 6 MutationType ops pending"
-commit transaction #20
-begin transaction #21
-## PostCommitNonRevertiblePhase stage 4 of 4 with 8 MutationType ops
-upsert descriptor #104
- ...
- createAsOfTime:
- wallTime: "1640995200000000000"
- - declarativeSchemaChangerState:
- - authorization:
- - userName: root
- - currentStatuses:
- - jobId: "1"
- - nameMapping:
- - columns:
- - "1": i
- - "3": j
- - "4294967292": crdb_internal_origin_timestamp
- - "4294967293": crdb_internal_origin_id
- - "4294967294": tableoid
- - "4294967295": crdb_internal_mvcc_timestamp
- - families:
- - "0": primary
- - id: 104
- - indexes:
- - "4": t_pkey
- - name: t
- - relevantStatements:
- - - statement:
- - redactedStatement: ALTER TABLE ‹defaultdb›.‹public›.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8
- - statement: ALTER TABLE t ALTER COLUMN j SET DATA TYPE INT8 USING j::INT8
- - statementTag: ALTER TABLE
- - targetRanks:
- - targets:
- families:
- - columnIds:
- - 1
- - - 2
- - 3
- columnNames:
- - i
- - j
- - - j
- defaultColumnId: 2
- name: primary
- ...
- id: 104
- modificationTime: {}
- - mutations:
- - - column:
- - id: 2
- - name: j
- - nullable: true
- - type:
- - family: StringFamily
- - oid: 25
- - direction: DROP
- - mutationId: 1
- - state: DELETE_ONLY
- - - direction: DROP
- - index:
- - constraintId: 2
- - createdExplicitly: true
- - encodingType: 1
- - foreignKey: {}
- - geoConfig: {}
- - id: 2
- - interleave: {}
- - keyColumnDirections:
- - - ASC
- - keyColumnIds:
- - - 1
- - keyColumnNames:
- - - i
- - name: crdb_internal_index_2_name_placeholder
- - partitioning: {}
- - sharded: {}
- - storeColumnIds:
- - - 2
- - - 3
- - storeColumnNames:
- - - j
- - - j
- - unique: true
- - version: 4
- - mutationId: 1
- - state: DELETE_ONLY
- + mutations: []
- name: t
- nextColumnId: 4
- ...
- time: {}
- unexposedParentSchemaId: 101
- - version: "14"
- + version: "15"
-persist all catalog changes to storage
-create job #2 (non-cancelable: true): "GC for ALTER TABLE defaultdb.public.t ALTER COLUMN j SET DATA TYPE INT8 USING j::INT8"
- descriptor IDs: [104]
-update progress of schema change job #1: "all stages completed"
-set schema change job #1 to non-cancellable
-updated schema change job #1 descriptor IDs to []
-write *eventpb.FinishSchemaChange to event log:
- sc:
- descriptorId: 104
-commit transaction #21
-notified job registry to adopt jobs: [2]
-# end PostCommitPhase
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_10_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_10_of_15.explain
deleted file mode 100644
index 02d36597e3d2..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_10_of_15.explain
+++ /dev/null
@@ -1,86 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 10 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 3 in PostCommitNonRevertiblePhase
- │ ├── 2 elements transitioning toward PUBLIC
- │ │ ├── VALIDATED → PUBLIC PrimaryIndex:{DescID: 104 (t), IndexID: 1 (t_pkey+), ConstraintID: 1}
- │ │ └── ABSENT → PUBLIC IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 1 (t_pkey+)}
- │ ├── 15 elements transitioning toward ABSENT
- │ │ ├── PUBLIC → VALIDATED PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 2 (t_pkey-)}
- │ │ ├── TRANSIENT_DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 5}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 5}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 19 Mutation operations
- │ ├── SetIndexName {"IndexID":1,"Name":"t_pkey","TableID":104}
- │ ├── MakePublicPrimaryIndexWriteOnly {"IndexID":2,"TableID":104}
- │ ├── SetIndexName {"IndexID":2,"Name":"crdb_internal_in...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":5,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":5,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":5,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeValidatedPrimaryIndexPublic {"IndexID":1,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- ├── Stage 2 of 3 in PostCommitNonRevertiblePhase
- │ ├── 5 elements transitioning toward ABSENT
- │ │ ├── VALIDATED → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ └── 7 Mutation operations
- │ ├── MakeIndexAbsent {"IndexID":5,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 3 of 3 in PostCommitNonRevertiblePhase
- ├── 8 elements transitioning toward ABSENT
- │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 5}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 9 Mutation operations
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":5,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_11_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_11_of_15.explain
deleted file mode 100644
index 2459b746fa60..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_11_of_15.explain
+++ /dev/null
@@ -1,86 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 11 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 3 in PostCommitNonRevertiblePhase
- │ ├── 2 elements transitioning toward PUBLIC
- │ │ ├── VALIDATED → PUBLIC PrimaryIndex:{DescID: 104 (t), IndexID: 1 (t_pkey+), ConstraintID: 1}
- │ │ └── ABSENT → PUBLIC IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 1 (t_pkey+)}
- │ ├── 15 elements transitioning toward ABSENT
- │ │ ├── PUBLIC → VALIDATED PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 2 (t_pkey-)}
- │ │ ├── TRANSIENT_DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 5}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 5}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 19 Mutation operations
- │ ├── SetIndexName {"IndexID":1,"Name":"t_pkey","TableID":104}
- │ ├── MakePublicPrimaryIndexWriteOnly {"IndexID":2,"TableID":104}
- │ ├── SetIndexName {"IndexID":2,"Name":"crdb_internal_in...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":5,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":5,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":5,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeValidatedPrimaryIndexPublic {"IndexID":1,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- ├── Stage 2 of 3 in PostCommitNonRevertiblePhase
- │ ├── 5 elements transitioning toward ABSENT
- │ │ ├── VALIDATED → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ └── 7 Mutation operations
- │ ├── MakeIndexAbsent {"IndexID":5,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 3 of 3 in PostCommitNonRevertiblePhase
- ├── 8 elements transitioning toward ABSENT
- │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 5}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 9 Mutation operations
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":5,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_12_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_12_of_15.explain
deleted file mode 100644
index c6ba1e7046e0..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_12_of_15.explain
+++ /dev/null
@@ -1,86 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 12 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 3 in PostCommitNonRevertiblePhase
- │ ├── 2 elements transitioning toward PUBLIC
- │ │ ├── VALIDATED → PUBLIC PrimaryIndex:{DescID: 104 (t), IndexID: 1 (t_pkey+), ConstraintID: 1}
- │ │ └── ABSENT → PUBLIC IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 1 (t_pkey+)}
- │ ├── 15 elements transitioning toward ABSENT
- │ │ ├── PUBLIC → VALIDATED PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 2 (t_pkey-)}
- │ │ ├── TRANSIENT_DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 5}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 5}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 19 Mutation operations
- │ ├── SetIndexName {"IndexID":1,"Name":"t_pkey","TableID":104}
- │ ├── MakePublicPrimaryIndexWriteOnly {"IndexID":2,"TableID":104}
- │ ├── SetIndexName {"IndexID":2,"Name":"crdb_internal_in...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":5,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":5,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":5,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeValidatedPrimaryIndexPublic {"IndexID":1,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- ├── Stage 2 of 3 in PostCommitNonRevertiblePhase
- │ ├── 5 elements transitioning toward ABSENT
- │ │ ├── VALIDATED → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ └── 7 Mutation operations
- │ ├── MakeIndexAbsent {"IndexID":5,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 3 of 3 in PostCommitNonRevertiblePhase
- ├── 8 elements transitioning toward ABSENT
- │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 5}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 9 Mutation operations
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":5,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_13_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_13_of_15.explain
deleted file mode 100644
index e93b7f567bf9..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_13_of_15.explain
+++ /dev/null
@@ -1,88 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 13 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 3 in PostCommitNonRevertiblePhase
- │ ├── 2 elements transitioning toward PUBLIC
- │ │ ├── VALIDATED → PUBLIC PrimaryIndex:{DescID: 104 (t), IndexID: 1 (t_pkey+), ConstraintID: 1}
- │ │ └── ABSENT → PUBLIC IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 1 (t_pkey+)}
- │ ├── 15 elements transitioning toward ABSENT
- │ │ ├── PUBLIC → VALIDATED PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 2 (t_pkey-)}
- │ │ ├── TRANSIENT_DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── MERGE_ONLY → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 5}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 5}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 19 Mutation operations
- │ ├── SetIndexName {"IndexID":1,"Name":"t_pkey","TableID":104}
- │ ├── MakePublicPrimaryIndexWriteOnly {"IndexID":2,"TableID":104}
- │ ├── SetIndexName {"IndexID":2,"Name":"crdb_internal_in...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":5,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":5,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":5,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeValidatedPrimaryIndexPublic {"IndexID":1,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":4,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- ├── Stage 2 of 3 in PostCommitNonRevertiblePhase
- │ ├── 6 elements transitioning toward ABSENT
- │ │ ├── VALIDATED → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ └── 8 Mutation operations
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":5,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 3 of 3 in PostCommitNonRevertiblePhase
- ├── 8 elements transitioning toward ABSENT
- │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 5}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 9 Mutation operations
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":5,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_14_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_14_of_15.explain
deleted file mode 100644
index dc966e4804a4..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_14_of_15.explain
+++ /dev/null
@@ -1,88 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 14 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 3 in PostCommitNonRevertiblePhase
- │ ├── 2 elements transitioning toward PUBLIC
- │ │ ├── VALIDATED → PUBLIC PrimaryIndex:{DescID: 104 (t), IndexID: 1 (t_pkey+), ConstraintID: 1}
- │ │ └── ABSENT → PUBLIC IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 1 (t_pkey+)}
- │ ├── 15 elements transitioning toward ABSENT
- │ │ ├── PUBLIC → VALIDATED PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 2 (t_pkey-)}
- │ │ ├── TRANSIENT_DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── MERGE_ONLY → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 5}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 5}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 19 Mutation operations
- │ ├── SetIndexName {"IndexID":1,"Name":"t_pkey","TableID":104}
- │ ├── MakePublicPrimaryIndexWriteOnly {"IndexID":2,"TableID":104}
- │ ├── SetIndexName {"IndexID":2,"Name":"crdb_internal_in...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":5,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":5,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":5,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeValidatedPrimaryIndexPublic {"IndexID":1,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":4,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- ├── Stage 2 of 3 in PostCommitNonRevertiblePhase
- │ ├── 6 elements transitioning toward ABSENT
- │ │ ├── VALIDATED → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ └── 8 Mutation operations
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":5,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 3 of 3 in PostCommitNonRevertiblePhase
- ├── 8 elements transitioning toward ABSENT
- │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 5}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 9 Mutation operations
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":5,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_15_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_15_of_15.explain
deleted file mode 100644
index cde59f8f2e2d..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_15_of_15.explain
+++ /dev/null
@@ -1,86 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 15 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 3 in PostCommitNonRevertiblePhase
- │ ├── 2 elements transitioning toward PUBLIC
- │ │ ├── VALIDATED → PUBLIC PrimaryIndex:{DescID: 104 (t), IndexID: 1 (t_pkey+), ConstraintID: 1}
- │ │ └── ABSENT → PUBLIC IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 1 (t_pkey+)}
- │ ├── 15 elements transitioning toward ABSENT
- │ │ ├── PUBLIC → VALIDATED PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 2 (t_pkey-)}
- │ │ ├── TRANSIENT_DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── WRITE_ONLY → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── TRANSIENT_DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 5}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 5}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 19 Mutation operations
- │ ├── SetIndexName {"IndexID":1,"Name":"t_pkey","TableID":104}
- │ ├── MakePublicPrimaryIndexWriteOnly {"IndexID":2,"TableID":104}
- │ ├── SetIndexName {"IndexID":2,"Name":"crdb_internal_in...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":4,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":5,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":5,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeValidatedPrimaryIndexPublic {"IndexID":1,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":5,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- ├── Stage 2 of 3 in PostCommitNonRevertiblePhase
- │ ├── 5 elements transitioning toward ABSENT
- │ │ ├── VALIDATED → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ └── 7 Mutation operations
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 3 of 3 in PostCommitNonRevertiblePhase
- ├── 8 elements transitioning toward ABSENT
- │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 5}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 9 Mutation operations
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":5,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_1_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_1_of_15.explain
deleted file mode 100644
index ea31b12ba887..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_1_of_15.explain
+++ /dev/null
@@ -1,51 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 1 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- └── Stage 1 of 1 in PostCommitNonRevertiblePhase
- ├── 18 elements transitioning toward ABSENT
- │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ ├── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- └── 19 Mutation operations
- ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_2_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_2_of_15.explain
deleted file mode 100644
index 12c698859eda..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_2_of_15.explain
+++ /dev/null
@@ -1,62 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 2 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 2 in PostCommitNonRevertiblePhase
- │ ├── 14 elements transitioning toward ABSENT
- │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 16 Mutation operations
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 2 of 2 in PostCommitNonRevertiblePhase
- ├── 7 elements transitioning toward ABSENT
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 8 Mutation operations
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_3_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_3_of_15.explain
deleted file mode 100644
index 9e6631f17057..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_3_of_15.explain
+++ /dev/null
@@ -1,62 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 3 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 2 in PostCommitNonRevertiblePhase
- │ ├── 14 elements transitioning toward ABSENT
- │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 16 Mutation operations
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 2 of 2 in PostCommitNonRevertiblePhase
- ├── 7 elements transitioning toward ABSENT
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 8 Mutation operations
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_4_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_4_of_15.explain
deleted file mode 100644
index e900d1153604..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_4_of_15.explain
+++ /dev/null
@@ -1,62 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 4 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 2 in PostCommitNonRevertiblePhase
- │ ├── 14 elements transitioning toward ABSENT
- │ │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 16 Mutation operations
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 2 of 2 in PostCommitNonRevertiblePhase
- ├── 7 elements transitioning toward ABSENT
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 8 Mutation operations
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_5_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_5_of_15.explain
deleted file mode 100644
index fbf5e8d5becb..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_5_of_15.explain
+++ /dev/null
@@ -1,64 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 5 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 2 in PostCommitNonRevertiblePhase
- │ ├── 14 elements transitioning toward ABSENT
- │ │ ├── MERGE_ONLY → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 16 Mutation operations
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 2 of 2 in PostCommitNonRevertiblePhase
- ├── 8 elements transitioning toward ABSENT
- │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 9 Mutation operations
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_6_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_6_of_15.explain
deleted file mode 100644
index fd5391be1bce..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_6_of_15.explain
+++ /dev/null
@@ -1,64 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 6 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 2 in PostCommitNonRevertiblePhase
- │ ├── 14 elements transitioning toward ABSENT
- │ │ ├── MERGE_ONLY → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 16 Mutation operations
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 2 of 2 in PostCommitNonRevertiblePhase
- ├── 8 elements transitioning toward ABSENT
- │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 9 Mutation operations
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_7_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_7_of_15.explain
deleted file mode 100644
index a2049d1b2092..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_7_of_15.explain
+++ /dev/null
@@ -1,62 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 7 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 2 in PostCommitNonRevertiblePhase
- │ ├── 14 elements transitioning toward ABSENT
- │ │ ├── WRITE_ONLY → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── TRANSIENT_DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 16 Mutation operations
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 2 of 2 in PostCommitNonRevertiblePhase
- ├── 7 elements transitioning toward ABSENT
- │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 8 Mutation operations
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_8_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_8_of_15.explain
deleted file mode 100644
index d609f44b088b..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_8_of_15.explain
+++ /dev/null
@@ -1,62 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 8 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 2 in PostCommitNonRevertiblePhase
- │ ├── 14 elements transitioning toward ABSENT
- │ │ ├── WRITE_ONLY → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ ├── TRANSIENT_DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 16 Mutation operations
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 2 of 2 in PostCommitNonRevertiblePhase
- ├── 7 elements transitioning toward ABSENT
- │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 8 Mutation operations
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_9_of_15.explain b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_9_of_15.explain
deleted file mode 100644
index d3a68f70529f..000000000000
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_type_general/alter_table_alter_column_type_general__rollback_9_of_15.explain
+++ /dev/null
@@ -1,82 +0,0 @@
-/* setup */
-CREATE TABLE t (i INT PRIMARY KEY, j TEXT);
-SET enable_experimental_alter_column_type_general=TRUE;
-INSERT INTO t VALUES (1,NULL),(2,'1'),(3,'2');
-
-/* test */
-ALTER TABLE t ALTER COLUMN j SET DATA TYPE BIGINT USING j::BIGINT;
-EXPLAIN (DDL) rollback at post-commit stage 9 of 15;
-----
-Schema change plan for rolling back ALTER TABLE ‹defaultdb›.public.‹t› ALTER COLUMN ‹j› SET DATA TYPE INT8 USING ‹j›::INT8;
- └── PostCommitNonRevertiblePhase
- ├── Stage 1 of 3 in PostCommitNonRevertiblePhase
- │ ├── 2 elements transitioning toward PUBLIC
- │ │ ├── VALIDATED → PUBLIC PrimaryIndex:{DescID: 104 (t), IndexID: 1 (t_pkey+), ConstraintID: 1}
- │ │ └── ABSENT → PUBLIC IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 1 (t_pkey+)}
- │ ├── 15 elements transitioning toward ABSENT
- │ │ ├── PUBLIC → VALIDATED PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexName:{DescID: 104 (t), Name: "t_pkey", IndexID: 2 (t_pkey-)}
- │ │ ├── TRANSIENT_DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 3}
- │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 4 (t_pkey-), ConstraintID: 4, TemporaryIndexID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 4 (t_pkey-)}
- │ │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 104 (t), IndexID: 5, ConstraintID: 5, SourceIndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 5}
- │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j", ColumnID: 3 (j-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 3}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 4 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 5}
- │ │ └── PUBLIC → ABSENT ColumnName:{DescID: 104 (t), Name: "j_shadow", ColumnID: 2 (j_shadow-j+)}
- │ └── 19 Mutation operations
- │ ├── SetIndexName {"IndexID":1,"Name":"t_pkey","TableID":104}
- │ ├── MakePublicPrimaryIndexWriteOnly {"IndexID":2,"TableID":104}
- │ ├── SetIndexName {"IndexID":2,"Name":"crdb_internal_in...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":4,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":5,"TableID":104}
- │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── SetColumnName {"ColumnID":3,"Name":"crdb_internal_co...","TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":3,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":4,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":5,"Kind":2,"TableID":104}
- │ ├── SetColumnName {"ColumnID":2,"Name":"j","TableID":104}
- │ ├── MakeValidatedPrimaryIndexPublic {"IndexID":1,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":3,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":4,"TableID":104}
- │ ├── MakeIndexAbsent {"IndexID":5,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- ├── Stage 2 of 3 in PostCommitNonRevertiblePhase
- │ ├── 4 elements transitioning toward ABSENT
- │ │ ├── VALIDATED → DELETE_ONLY PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 1 (i), IndexID: 2 (t_pkey-)}
- │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 2 (j_shadow-j+), IndexID: 2 (t_pkey-)}
- │ │ └── PUBLIC → ABSENT IndexColumn:{DescID: 104 (t), ColumnID: 3 (j-), IndexID: 2 (t_pkey-)}
- │ └── 6 Mutation operations
- │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":104}
- │ ├── RemoveColumnFromIndex {"ColumnID":3,"IndexID":2,"Kind":2,"Ordinal":1,"TableID":104}
- │ ├── SetJobStateOnDescriptor {"DescriptorID":104}
- │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
- └── Stage 3 of 3 in PostCommitNonRevertiblePhase
- ├── 7 elements transitioning toward ABSENT
- │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 104 (t), IndexID: 2 (t_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (t_pkey+)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 2 (t_pkey-)}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 3}
- │ ├── PUBLIC → ABSENT IndexData:{DescID: 104 (t), IndexID: 4 (t_pkey-)}
- │ ├── DELETE_ONLY → ABSENT Column:{DescID: 104 (t), ColumnID: 3 (j-)}
- │ ├── PUBLIC → ABSENT ColumnType:{DescID: 104 (t), ColumnFamilyID: 0 (primary), ColumnID: 3 (j-), TypeName: "INT8"}
- │ └── PUBLIC → ABSENT ColumnComputeExpression:{DescID: 104 (t), ColumnID: 3 (j-)}
- └── 8 Mutation operations
- ├── MakeIndexAbsent {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":2,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":3,"TableID":104}
- ├── CreateGCJobForIndex {"IndexID":4,"TableID":104}
- ├── RemoveColumnComputeExpression {"ColumnID":3,"TableID":104}
- ├── MakeDeleteOnlyColumnAbsent {"ColumnID":3,"TableID":104}
- ├── RemoveJobStateFromDescriptor {"DescriptorID":104}
- └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."}
diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go
index a1f19fb50322..ea5f6d9afb24 100644
--- a/pkg/sql/sem/builtins/builtins.go
+++ b/pkg/sql/sem/builtins/builtins.go
@@ -5850,54 +5850,6 @@ SELECT
},
),
- // Fetches the corresponding lease_holder for the request key. If an error
- // occurs, the query still succeeds and the error is included in the output.
- "crdb_internal.lease_holder_with_errors": makeBuiltin(
- tree.FunctionProperties{
- Category: builtinconstants.CategorySystemInfo,
- },
- tree.Overload{
- Types: tree.ParamTypes{{Name: "key", Typ: types.Bytes}},
- ReturnType: tree.FixedReturnType(types.Jsonb),
- Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) {
- if evalCtx.Txn == nil { // can occur during backfills
- return nil, pgerror.Newf(pgcode.FeatureNotSupported,
- "cannot use crdb_internal.lease_holder_with_errors in this context")
- }
- key := []byte(tree.MustBeDBytes(args[0]))
- b := evalCtx.Txn.DB().NewBatch()
- b.AddRawRequest(&kvpb.LeaseInfoRequest{
- RequestHeader: kvpb.RequestHeader{
- Key: key,
- },
- })
- type leaseholderAndError struct {
- Leaseholder roachpb.StoreID
- Error string
- }
- lhae := &leaseholderAndError{}
- if err := evalCtx.Txn.DB().Run(ctx, b); err != nil {
- lhae.Error = err.Error()
- } else {
- resp := b.RawResponse().Responses[0].GetInner().(*kvpb.LeaseInfoResponse)
- lhae.Leaseholder = resp.Lease.Replica.StoreID
- }
-
- jsonStr, err := gojson.Marshal(lhae)
- if err != nil {
- return nil, err
- }
- jsonDatum, err := tree.ParseDJSON(string(jsonStr))
- if err != nil {
- return nil, err
- }
- return jsonDatum, nil
- },
- Info: "This function is used to fetch the leaseholder corresponding to a request key",
- Volatility: volatility.Volatile,
- },
- ),
-
"crdb_internal.trim_tenant_prefix": makeBuiltin(
tree.FunctionProperties{
Category: builtinconstants.CategoryMultiTenancy,
@@ -6205,27 +6157,6 @@ SELECT
},
),
- // Return statistics about a range.
- "crdb_internal.range_stats_with_errors": makeBuiltin(
- tree.FunctionProperties{
- Category: builtinconstants.CategorySystemInfo,
- },
- tree.Overload{
- Types: tree.ParamTypes{
- {Name: "key", Typ: types.Bytes},
- },
- SpecializedVecBuiltin: tree.CrdbInternalRangeStatsWithErrors,
- ReturnType: tree.FixedReturnType(types.Jsonb),
- Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) {
- // This function is a placeholder and will never be called because
- // CrdbInternalRangeStatsWithErrors overrides it.
- return tree.DNull, nil
- },
- Info: "This function is used to retrieve range statistics information as a JSON object.",
- Volatility: volatility.Volatile,
- },
- ),
-
// Returns a namespace_id based on parentID and a given name.
// Allows a non-admin to query the system.namespace table, but performs
// the relevant permission checks to ensure secure access.
diff --git a/pkg/sql/sem/builtins/fixed_oids.go b/pkg/sql/sem/builtins/fixed_oids.go
index 6b507e59725f..110ee238a5ff 100644
--- a/pkg/sql/sem/builtins/fixed_oids.go
+++ b/pkg/sql/sem/builtins/fixed_oids.go
@@ -2604,8 +2604,6 @@ var builtinOidsArray = []string{
2641: `crdb_internal.clear_table_stats_cache() -> void`,
2642: `crdb_internal.get_fully_qualified_table_name(table_descriptor_id: int) -> string`,
2643: `crdb_internal.type_is_indexable(oid: oid) -> bool`,
- 2644: `crdb_internal.range_stats_with_errors(key: bytes) -> jsonb`,
- 2645: `crdb_internal.lease_holder_with_errors(key: bytes) -> jsonb`,
}
var builtinOidsBySignature map[string]oid.Oid
diff --git a/pkg/sql/sem/tree/constant.go b/pkg/sql/sem/tree/constant.go
index 2256369fb9b8..12054c943c12 100644
--- a/pkg/sql/sem/tree/constant.go
+++ b/pkg/sql/sem/tree/constant.go
@@ -507,6 +507,7 @@ var (
// default type that raw strings get parsed into, without any casts or type
// assertions.
types.String,
+ types.AnyCollatedString,
types.Bytes,
types.Bool,
types.Int,
diff --git a/pkg/sql/sem/tree/constant_test.go b/pkg/sql/sem/tree/constant_test.go
index 10d87000da22..d048cda345d9 100644
--- a/pkg/sql/sem/tree/constant_test.go
+++ b/pkg/sql/sem/tree/constant_test.go
@@ -44,6 +44,12 @@ func TestAvailTypesAreSets(t *testing.T) {
for i, test := range testCases {
seen := make(map[oid.Oid]struct{})
for _, newType := range test.availTypes {
+ // Collated strings have the same Oid as uncollated strings, but we need the
+ // ability to parse constants as collated strings when that is the desired
+ // type.
+ if newType.Family() == types.CollatedStringFamily {
+ continue
+ }
if _, ok := seen[newType.Oid()]; ok {
t.Errorf("%d: found duplicate type: %v", i, newType)
}
@@ -214,6 +220,13 @@ func TestStringConstantVerifyAvailableTypes(t *testing.T) {
continue
}
+ // The collated string value in c.AvailableTypes() is AnyCollatedString, so we
+ // will not be able to resolve that exact type. In actual execution, the constant
+ // would be resolved with an actual desired locale.
+ if availType.Family() == types.CollatedStringFamily {
+ continue
+ }
+
semaCtx := tree.MakeSemaContext(nil /* resolver */)
if _, err := test.c.ResolveAsType(context.Background(), &semaCtx, availType); err != nil {
if !strings.Contains(err.Error(), "could not parse") &&
@@ -680,6 +693,13 @@ func TestStringConstantResolveAvailableTypes(t *testing.T) {
continue
}
+ // The collated string value in c.AvailableTypes() is AnyCollatedString, so we
+ // will not be able to resolve that exact type. In actual execution, the constant
+ // would be resolved with an actual desired locale.
+ if availType.Family() == types.CollatedStringFamily {
+ continue
+ }
+
semaCtx := tree.MakeSemaContext(nil /* resolver */)
typedExpr, err := test.c.ResolveAsType(ctx, &semaCtx, availType)
var res tree.Datum
diff --git a/pkg/sql/sem/tree/overload.go b/pkg/sql/sem/tree/overload.go
index f5c1131b1803..2a212286fa76 100644
--- a/pkg/sql/sem/tree/overload.go
+++ b/pkg/sql/sem/tree/overload.go
@@ -38,7 +38,6 @@ const (
_ SpecializedVectorizedBuiltin = iota
SubstringStringIntInt
CrdbInternalRangeStats
- CrdbInternalRangeStatsWithErrors
)
// AggregateOverload is an opaque type which is used to box an eval.AggregateOverload.
diff --git a/pkg/sql/sessiondatapb/local_only_session_data.proto b/pkg/sql/sessiondatapb/local_only_session_data.proto
index 76b8103522da..e94c25c465c3 100644
--- a/pkg/sql/sessiondatapb/local_only_session_data.proto
+++ b/pkg/sql/sessiondatapb/local_only_session_data.proto
@@ -548,6 +548,9 @@ message LocalOnlySessionData {
// written in this session were originally written with before being
// replicated via Logical Data Replication.
util.hlc.Timestamp origin_timestamp_for_logical_data_replication = 140 [(gogoproto.nullable) = false];
+ // BypassPCRReaderCatalogAOST disables the AOST used by all user queries on
+ // the PCR reader catalog.
+ bool bypass_pcr_reader_catalog_aost = 141 [(gogoproto.customname) = "BypassPCRReaderCatalogAOST"];
///////////////////////////////////////////////////////////////////////////
// WARNING: consider whether a session parameter you're adding needs to //
diff --git a/pkg/sql/show_ranges_test.go b/pkg/sql/show_ranges_test.go
index 0986f065f9e4..b61fbb7c6b88 100644
--- a/pkg/sql/show_ranges_test.go
+++ b/pkg/sql/show_ranges_test.go
@@ -15,6 +15,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/sqltestutils"
+ "github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
@@ -80,12 +81,14 @@ FROM [SHOW RANGES FROM TABLE t WITH DETAILS]`
}
}
-// TestRangeLocalityBasedOnNodeIDs tests that the leaseholder_locality shown in
+// TestShowRangesMultipleStores tests that the leaseholder_locality shown in
// SHOW RANGES works correctly.
func TestShowRangesMultipleStores(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
+ skip.UnderRace(t, "the test is too heavy")
+
ctx := context.Background()
// NodeID=1, StoreID=1,2
@@ -116,7 +119,7 @@ func TestShowRangesMultipleStores(t *testing.T) {
)
assert.NoError(t, tc.WaitForFullReplication())
- // Scatter a system table so that the lease is unlike to be on node 1.
+ // Scatter a system table so that the lease is unlikely to be on node 1.
sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
sqlDB.Exec(t, "ALTER TABLE system.jobs SCATTER")
// Ensure that the localities line up.
@@ -128,7 +131,7 @@ func TestShowRangesMultipleStores(t *testing.T) {
"SHOW RANGE FROM INDEX system.jobs@jobs_status_created_idx FOR ROW ('running', now(), 0)",
} {
t.Run(q, func(t *testing.T) {
- // Retry because if there's not a leaseholder, you can NULL.
+ // Retry because if there's not a leaseholder, you can get a NULL.
sqlDB.CheckQueryResultsRetry(t,
fmt.Sprintf(`
SELECT DISTINCT
@@ -231,60 +234,3 @@ func TestShowRangesWithDetails(t *testing.T) {
// val_bytes for the whole table.
require.Equal(t, valBytesPreSplit, valBytesR1+valBytesR2)
}
-
-// TestShowRangesUnavailableReplicas tests that SHOW RANGES does not return an
-// error if it encounters an unavailable range. Moreover, crdb_internal.ranges
-// includes the encountered error.
-func TestShowRangesUnavailableReplicas(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
-
- const numNodes = 3
- ctx := context.Background()
- tc := testcluster.StartTestCluster(
- // Manual replication will prevent the leaseholder for the unavailable range
- // from moving a different node.
- t, numNodes, base.TestClusterArgs{ReplicationMode: base.ReplicationManual},
- )
- defer tc.Stopper().Stop(ctx)
-
- sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
- sqlDB.Exec(t, `SET CLUSTER SETTING kv.replica_circuit_breaker.slow_replication_threshold='1s'`)
- sqlDB.Exec(t, `CREATE TABLE t (x INT PRIMARY KEY)`)
- // Split the table's range to have a better chance of moving some leaseholders
- // off of node 1 in the scatter below.
- sqlDB.Exec(t, `ALTER TABLE t SPLIT AT SELECT i FROM generate_series(0, 20) AS g(i)`)
- sqlDB.Exec(t, `ALTER TABLE t SCATTER`)
-
- // Server 0 includes the leaseholders for all system ranges, but the other two
- // are safe to stop to create some unavailable ranges that belong to table t.
- tc.StopServer(1)
- tc.StopServer(2)
-
- q := `SELECT range_id, lease_holder, range_size FROM [SHOW RANGES FROM TABLE t WITH DETAILS]`
- result := sqlDB.QueryStr(t, q)
- unavailableRangeID := ""
- // Iterate over the results to find an unavailable range.
- for _, row := range result {
- // crdb_internal.ranges powers the lease_holder and range_size fields in
- // SHOW RANGES. If a range is unavailable, the former returns NULL for both
- // fields but the latter converts the NULL leaseholder to 0.
- if row[1] == "0" {
- unavailableRangeID = row[0]
- require.Equal(t, "NULL", row[2])
- break
- }
- }
- // Ensure there it at least one unavailable range.
- require.NotEqual(t, "", unavailableRangeID)
-
- // crdb_internal.ranges also has an "errors" field that includes any errors
- // encountered while fetching the leaseholder and range stats. For the
- // unavailable range, we expect a "replica unavailable" error.
- q = fmt.Sprintf(`SELECT errors FROM crdb_internal.ranges WHERE range_id = %s`, unavailableRangeID)
- result = sqlDB.QueryStr(t, q)
- expectedError := fmt.Sprintf(
- "replica unavailable.*unable to serve request to r%s", unavailableRangeID,
- )
- require.Regexp(t, expectedError, result[0][0])
-}
diff --git a/pkg/sql/sqlerrors/errors.go b/pkg/sql/sqlerrors/errors.go
index 40fc75076ed2..bdc7cc6501be 100644
--- a/pkg/sql/sqlerrors/errors.go
+++ b/pkg/sql/sqlerrors/errors.go
@@ -307,12 +307,6 @@ func NewDependentBlocksOpError(op, objType, objName, dependentType, dependentNam
"consider dropping %q first.", dependentName)
}
-func NewAlterColTypeInCombinationNotSupportedError() error {
- return unimplemented.NewWithIssuef(
- 49351, "ALTER COLUMN TYPE cannot be used in combination "+
- "with other ALTER TABLE commands")
-}
-
const PrimaryIndexSwapDetail = `CRDB's implementation for "ADD COLUMN", "DROP COLUMN", and "ALTER PRIMARY KEY" will drop the old/current primary index and create a new one.`
// NewColumnReferencedByPrimaryKeyError is returned when attempting to drop a
diff --git a/pkg/sql/sqlstats/persistedsqlstats/provider.go b/pkg/sql/sqlstats/persistedsqlstats/provider.go
index a4b75f6e1755..cc158b8d4ef9 100644
--- a/pkg/sql/sqlstats/persistedsqlstats/provider.go
+++ b/pkg/sql/sqlstats/persistedsqlstats/provider.go
@@ -61,6 +61,12 @@ type PersistedSQLStats struct {
cfg *Config
+ // memoryPressureSignal is used by the persistedsqlstats.ApplicationStats to signal
+ // memory pressure during stats recording. A signal is emitted through this
+ // channel either if the fingerprint limit or the memory limit has been
+ // exceeded.
+ memoryPressureSignal chan struct{}
+
// Used to signal the flush completed.
flushDoneMu struct {
syncutil.Mutex
@@ -88,9 +94,10 @@ var _ sqlstats.Provider = &PersistedSQLStats{}
// New returns a new instance of the PersistedSQLStats.
func New(cfg *Config, memSQLStats *sslocal.SQLStats) *PersistedSQLStats {
p := &PersistedSQLStats{
- SQLStats: memSQLStats,
- cfg: cfg,
- drain: make(chan struct{}),
+ SQLStats: memSQLStats,
+ cfg: cfg,
+ memoryPressureSignal: make(chan struct{}),
+ drain: make(chan struct{}),
}
p.jobMonitor = jobMonitor{
@@ -168,6 +175,10 @@ func (s *PersistedSQLStats) startSQLStatsFlushLoop(ctx context.Context, stopper
select {
case <-timer.C:
timer.Read = true
+ case <-s.memoryPressureSignal:
+ // We are experiencing memory pressure, so we flush SQL stats to disk
+ // immediately, rather than waiting the full flush interval, in an
+ // attempt to relieve some of that pressure.
case <-resetIntervalChanged:
// In this case, we would restart the loop without performing any flush
// and recalculate the flush interval in the for-loop's post statement.
diff --git a/pkg/sql/tablewriter_upsert_opt.go b/pkg/sql/tablewriter_upsert_opt.go
index aafd0f1bc349..f622b611958e 100644
--- a/pkg/sql/tablewriter_upsert_opt.go
+++ b/pkg/sql/tablewriter_upsert_opt.go
@@ -255,17 +255,6 @@ func (tu *optTableUpserter) updateConflictingRow(
pm row.PartialIndexUpdateHelper,
traceKV bool,
) error {
- // Enforce the column constraints.
- // Note: the column constraints are already enforced for fetchRow,
- // because:
- // - for the insert part, they were checked upstream in upsertNode
- // via GenerateInsertRow().
- // - for the fetched part, we assume that the data in the table is
- // correct already.
- if err := enforceNotNullConstraints(updateValues, tu.updateCols); err != nil {
- return err
- }
-
// Queue the update in KV. This also returns an "update row"
// containing the updated values for every column in the
// table. This is useful for RETURNING, which we collect below.
diff --git a/pkg/sql/upsert.go b/pkg/sql/upsert.go
index de18bbc5f735..e7bd0fdb5e4b 100644
--- a/pkg/sql/upsert.go
+++ b/pkg/sql/upsert.go
@@ -134,8 +134,26 @@ func (n *upsertNode) BatchedNext(params runParams) (bool, error) {
// processSourceRow processes one row from the source for upsertion.
// The table writer is in charge of accumulating the result rows.
func (n *upsertNode) processSourceRow(params runParams, rowVals tree.Datums) error {
- if err := enforceNotNullConstraints(rowVals, n.run.insertCols); err != nil {
- return err
+ // Check for NOT NULL constraint violations.
+ if n.run.tw.canaryOrdinal != -1 && rowVals[n.run.tw.canaryOrdinal] != tree.DNull {
+ // When there is a canary column and its value is not NULL, then an
+ // existing row is being updated, so check only the update columns for
+ // NOT NULL constraint violations.
+ offset := len(n.run.insertCols) + len(n.run.tw.fetchCols)
+ vals := rowVals[offset : offset+len(n.run.tw.updateCols)]
+ if err := enforceNotNullConstraints(vals, n.run.tw.updateCols); err != nil {
+ return err
+ }
+ } else {
+ // Otherwise, there is no canary column (i.e., canaryOrdinal is -1,
+ // which is the case for "blind" upsert which overwrites existing rows
+ // without performing a read) or it is NULL, indicating that a new row
+ // is being inserted. In this case, check the insert columns for a NOT
+ // NULL constraint violation.
+ vals := rowVals[:len(n.run.insertCols)]
+ if err := enforceNotNullConstraints(vals, n.run.insertCols); err != nil {
+ return err
+ }
}
// Create a set of partial index IDs to not add or remove entries from.
diff --git a/pkg/sql/vars.go b/pkg/sql/vars.go
index 1e7f6de813a5..7df5054f5505 100644
--- a/pkg/sql/vars.go
+++ b/pkg/sql/vars.go
@@ -3491,6 +3491,21 @@ var varGen = map[string]sessionVar{
},
GlobalDefault: globalTrue,
},
+ // CockroachDB extension.
+ `bypass_pcr_reader_catalog_aost`: {
+ Set: func(_ context.Context, m sessionDataMutator, s string) error {
+ b, err := paramparse.ParseBoolVar("bypass_pcr_reader_catalog_aost", s)
+ if err != nil {
+ return err
+ }
+ m.SetBypassPCRReaderCatalogAOST(b)
+ return nil
+ },
+ Get: func(evalCtx *extendedEvalContext, _ *kv.Txn) (string, error) {
+ return formatBoolAsPostgresSetting(evalCtx.SessionData().BypassPCRReaderCatalogAOST), nil
+ },
+ GlobalDefault: globalFalse,
+ },
}
func ReplicationModeFromString(s string) (sessiondatapb.ReplicationMode, error) {
diff --git a/pkg/storage/BUILD.bazel b/pkg/storage/BUILD.bazel
index 9548e949f875..51be20522921 100644
--- a/pkg/storage/BUILD.bazel
+++ b/pkg/storage/BUILD.bazel
@@ -122,6 +122,7 @@ go_test(
"batch_test.go",
"bench_cloud_io_test.go",
"bench_data_test.go",
+ "bench_pebble_test.go",
"bench_test.go",
"disk_map_test.go",
"engine_key_test.go",
diff --git a/pkg/storage/bench_data_test.go b/pkg/storage/bench_data_test.go
index 4f8c02a6de85..69d4878d78fd 100644
--- a/pkg/storage/bench_data_test.go
+++ b/pkg/storage/bench_data_test.go
@@ -210,7 +210,7 @@ var _ initialState = mvccBenchData{}
func (d mvccBenchData) Key() []string {
key := []string{
"mvcc",
- fmt.Sprintf("cv_%s", clusterversion.PreviousRelease.Version()),
+ fmt.Sprintf("fmtver_%d", previousReleaseFormatMajorVersion),
fmt.Sprintf("numKeys_%d", d.numKeys),
fmt.Sprintf("numVersions_%d", d.numVersions),
fmt.Sprintf("valueBytes_%d", d.valueBytes),
@@ -428,7 +428,7 @@ var _ initialState = mvccImportedData{}
func (i mvccImportedData) Key() []string {
key := []string{
"mvcc",
- fmt.Sprintf("cv_%s", clusterversion.PreviousRelease.Version()),
+ fmt.Sprintf("fmtver_%d", previousReleaseFormatMajorVersion),
fmt.Sprintf("streak_%d", i.streakBound),
fmt.Sprintf("keys_%d", i.keyCount),
fmt.Sprintf("valueBytes_%d", i.valueBytes),
diff --git a/pkg/storage/bench_pebble_test.go b/pkg/storage/bench_pebble_test.go
new file mode 100644
index 000000000000..972a6c5e84d5
--- /dev/null
+++ b/pkg/storage/bench_pebble_test.go
@@ -0,0 +1,909 @@
+// Copyright 2019 The Cockroach Authors.
+//
+// Use of this software is governed by the CockroachDB Software License
+// included in the /LICENSE file.
+
+package storage
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "testing"
+
+ "github.com/cockroachdb/cockroach/pkg/clusterversion"
+ "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock"
+ "github.com/cockroachdb/cockroach/pkg/roachpb"
+ "github.com/cockroachdb/cockroach/pkg/settings/cluster"
+ "github.com/cockroachdb/cockroach/pkg/storage/fs"
+ "github.com/cockroachdb/cockroach/pkg/testutils/skip"
+ "github.com/cockroachdb/cockroach/pkg/util/encoding"
+ "github.com/cockroachdb/cockroach/pkg/util/hlc"
+ "github.com/cockroachdb/cockroach/pkg/util/log"
+ "github.com/cockroachdb/cockroach/pkg/util/randutil"
+ "github.com/cockroachdb/cockroach/pkg/util/timeutil"
+ "github.com/cockroachdb/pebble"
+ "github.com/stretchr/testify/require"
+)
+
+const testCacheSize = 1 << 30 // 1 GB
+
+func setupMVCCPebble(b testing.TB, dir string) Engine {
+ peb, err := Open(
+ context.Background(),
+ fs.MustInitPhysicalTestingEnv(dir),
+ cluster.MakeTestingClusterSettings(),
+ CacheSize(testCacheSize))
+ if err != nil {
+ b.Fatalf("could not create new pebble instance at %s: %+v", dir, err)
+ }
+ return peb
+}
+
+func setupMVCCInMemPebble(b testing.TB, loc string) Engine {
+ return setupMVCCInMemPebbleWithSeparatedIntents(b)
+}
+
+func setupMVCCInMemPebbleWithSeparatedIntents(b testing.TB) Engine {
+ peb, err := Open(
+ context.Background(),
+ InMemory(),
+ cluster.MakeClusterSettings(),
+ CacheSize(testCacheSize))
+ if err != nil {
+ b.Fatalf("could not create new in-mem pebble instance: %+v", err)
+ }
+ return peb
+}
+
+func setupPebbleInMemPebbleForLatestRelease(b testing.TB, _ string) Engine {
+ ctx := context.Background()
+ s := cluster.MakeClusterSettings()
+ if err := clusterversion.Initialize(ctx, clusterversion.Latest.Version(),
+ &s.SV); err != nil {
+ b.Fatalf("failed to set current cluster version: %+v", err)
+ }
+
+ peb, err := Open(ctx, InMemory(), s, CacheSize(testCacheSize))
+ if err != nil {
+ b.Fatalf("could not create new in-mem pebble instance: %+v", err)
+ }
+ return peb
+}
+
+func BenchmarkMVCCScan_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ type testCase struct {
+ numRows int
+ numVersions int
+ valueSize int
+ numRangeKeys int
+ includeHeader bool
+ }
+ var testCases []testCase
+ for _, numRows := range []int{1, 10, 100, 1000, 10000, 50000} {
+ for _, numVersions := range []int{1, 2, 10, 100, 1000} {
+ for _, valueSize := range []int{8, 64, 512} {
+ for _, numRangeKeys := range []int{0, 1, 100} {
+ testCases = append(testCases, testCase{
+ numRows: numRows,
+ numVersions: numVersions,
+ valueSize: valueSize,
+ numRangeKeys: numRangeKeys,
+ })
+ }
+ }
+ }
+ }
+
+ if testing.Short() {
+ // Choose a few configurations for the short version.
+ testCases = []testCase{
+ {numRows: 1, numVersions: 1, valueSize: 8, numRangeKeys: 0},
+ {numRows: 100, numVersions: 2, valueSize: 64, numRangeKeys: 1},
+ {numRows: 1000, numVersions: 10, valueSize: 64, numRangeKeys: 100},
+ }
+ }
+
+ testCases = append(testCases, testCase{
+ numRows: 1000,
+ numVersions: 2,
+ valueSize: 64,
+ numRangeKeys: 0,
+ includeHeader: true,
+ })
+
+ for _, tc := range testCases {
+ name := fmt.Sprintf(
+ "rows=%d/versions=%d/valueSize=%d/numRangeKeys=%d/headers=%v",
+ tc.numRows, tc.numVersions, tc.valueSize, tc.numRangeKeys, tc.includeHeader,
+ )
+ b.Run(name, func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCScan(ctx, b, benchScanOptions{
+ mvccBenchData: mvccBenchData{
+ numVersions: tc.numVersions,
+ valueBytes: tc.valueSize,
+ numRangeKeys: tc.numRangeKeys,
+ includeHeader: tc.includeHeader,
+ },
+ numRows: tc.numRows,
+ reverse: false,
+ })
+ })
+ }
+}
+
+func BenchmarkMVCCScanGarbage_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ type testCase struct {
+ numRows int
+ numVersions int
+ numRangeKeys int
+ tombstones bool
+ }
+ var testCases []testCase
+ for _, numRows := range []int{1, 10, 100, 1000, 10000, 50000} {
+ for _, numVersions := range []int{1, 2, 10, 100, 1000} {
+ for _, numRangeKeys := range []int{0, 1, 100} {
+ for _, tombstones := range []bool{false, true} {
+ testCases = append(testCases, testCase{
+ numRows: numRows,
+ numVersions: numVersions,
+ numRangeKeys: numRangeKeys,
+ tombstones: tombstones,
+ })
+ }
+ }
+ }
+ }
+
+ if testing.Short() {
+ // Choose a few configurations for the short version.
+ testCases = []testCase{
+ {numRows: 1, numVersions: 1, numRangeKeys: 0, tombstones: false},
+ {numRows: 10, numVersions: 2, numRangeKeys: 1, tombstones: true},
+ {numRows: 1000, numVersions: 10, numRangeKeys: 100, tombstones: true},
+ }
+ }
+
+ for _, tc := range testCases {
+ name := fmt.Sprintf(
+ "rows=%d/versions=%d/numRangeKeys=%d/tombstones=%t",
+ tc.numRows, tc.numVersions, tc.numRangeKeys, tc.tombstones,
+ )
+ b.Run(name, func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCScan(ctx, b, benchScanOptions{
+ mvccBenchData: mvccBenchData{
+ numVersions: tc.numVersions,
+ numRangeKeys: tc.numRangeKeys,
+ garbage: true,
+ },
+ numRows: tc.numRows,
+ tombstones: tc.tombstones,
+ reverse: false,
+ })
+ })
+ }
+}
+
+func BenchmarkMVCCScanSQLRows_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ type testCase struct {
+ numRows int
+ numColumnFamilies int
+ numVersions int
+ valueSize int
+ wholeRows bool
+ }
+ var testCases []testCase
+ for _, numRows := range []int{1, 10, 100, 1000, 10000} {
+ for _, numColumnFamilies := range []int{1, 3, 10} {
+ for _, numVersions := range []int{1} {
+ for _, valueSize := range []int{8, 64, 512} {
+ for _, wholeRows := range []bool{false, true} {
+ testCases = append(testCases, testCase{
+ numRows: numRows,
+ numColumnFamilies: numColumnFamilies,
+ numVersions: numVersions,
+ valueSize: valueSize,
+ wholeRows: wholeRows,
+ })
+ }
+ }
+ }
+ }
+ }
+
+ if testing.Short() {
+ // Choose a few configurations for the short version.
+ testCases = []testCase{
+ {numRows: 1, numColumnFamilies: 1, numVersions: 1, valueSize: 8, wholeRows: false},
+ {numRows: 100, numColumnFamilies: 10, numVersions: 1, valueSize: 8, wholeRows: true},
+ {numRows: 1000, numColumnFamilies: 3, numVersions: 1, valueSize: 64, wholeRows: true},
+ }
+ }
+
+ for _, tc := range testCases {
+ name := fmt.Sprintf(
+ "rows=%d/columnFamillies=%d/versions=%d/valueSize=%d/wholeRows=%t",
+ tc.numRows, tc.numColumnFamilies, tc.numVersions, tc.valueSize, tc.wholeRows,
+ )
+ b.Run(name, func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCScan(ctx, b, benchScanOptions{
+ mvccBenchData: mvccBenchData{
+ numColumnFamilies: tc.numColumnFamilies,
+ numVersions: tc.numVersions,
+ valueBytes: tc.valueSize,
+ },
+ numRows: tc.numRows,
+ reverse: false,
+ wholeRows: tc.wholeRows,
+ })
+ })
+ }
+}
+
+func BenchmarkMVCCReverseScan_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ type testCase struct {
+ numRows int
+ numVersions int
+ valueSize int
+ numRangeKeys int
+ }
+ var testCases []testCase
+ for _, numRows := range []int{1, 10, 100, 1000, 10000, 50000} {
+ for _, numVersions := range []int{1, 2, 10, 100, 1000} {
+ for _, valueSize := range []int{8, 64, 512} {
+ for _, numRangeKeys := range []int{0, 1, 100} {
+ testCases = append(testCases, testCase{
+ numRows: numRows,
+ numVersions: numVersions,
+ valueSize: valueSize,
+ numRangeKeys: numRangeKeys,
+ })
+ }
+ }
+ }
+ }
+
+ if testing.Short() {
+ // Choose a few configurations for the short version.
+ testCases = []testCase{
+ {numRows: 1, numVersions: 1, valueSize: 8, numRangeKeys: 0},
+ {numRows: 100, numVersions: 1, valueSize: 8, numRangeKeys: 1},
+ {numRows: 1000, numVersions: 2, valueSize: 64, numRangeKeys: 100},
+ }
+ }
+
+ for _, tc := range testCases {
+ name := fmt.Sprintf(
+ "rows=%d/versions=%d/valueSize=%d/numRangeKeys=%d",
+ tc.numRows, tc.numVersions, tc.valueSize, tc.numRangeKeys,
+ )
+ b.Run(name, func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCScan(ctx, b, benchScanOptions{
+ mvccBenchData: mvccBenchData{
+ numVersions: tc.numVersions,
+ valueBytes: tc.valueSize,
+ numRangeKeys: tc.numRangeKeys,
+ },
+ numRows: tc.numRows,
+ reverse: true,
+ })
+ })
+ }
+}
+
+func BenchmarkMVCCScanTransactionalData_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ ctx := context.Background()
+ runMVCCScan(ctx, b, benchScanOptions{
+ numRows: 10000,
+ mvccBenchData: mvccBenchData{
+ numVersions: 2,
+ valueBytes: 8,
+ transactional: true,
+ },
+ })
+}
+
+func BenchmarkMVCCGet_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ type testCase struct {
+ batch bool
+ numVersions int
+ valueSize int
+ numRangeKeys int
+ }
+ var testCases []testCase
+ for _, batch := range []bool{false, true} {
+ for _, numVersions := range []int{1, 10, 100} {
+ for _, valueSize := range []int{8} {
+ for _, numRangeKeys := range []int{0, 1, 100} {
+ testCases = append(testCases, testCase{
+ batch: batch,
+ numVersions: numVersions,
+ valueSize: valueSize,
+ numRangeKeys: numRangeKeys,
+ })
+ }
+ }
+ }
+ }
+
+ if testing.Short() {
+ // Choose a few configurations for the short version.
+ testCases = []testCase{
+ {batch: false, numVersions: 1, valueSize: 8, numRangeKeys: 0},
+ {batch: true, numVersions: 10, valueSize: 8, numRangeKeys: 0},
+ {batch: true, numVersions: 10, valueSize: 8, numRangeKeys: 10},
+ }
+ }
+
+ for _, tc := range testCases {
+ name := fmt.Sprintf(
+ "batch=%t/versions=%d/valueSize=%d/numRangeKeys=%d",
+ tc.batch, tc.numVersions, tc.valueSize, tc.numRangeKeys,
+ )
+ b.Run(name, func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCGet(ctx, b, mvccBenchData{
+ numVersions: tc.numVersions,
+ valueBytes: tc.valueSize,
+ numRangeKeys: tc.numRangeKeys,
+ }, tc.batch)
+ })
+ }
+}
+
+func BenchmarkMVCCComputeStats_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ type testCase struct {
+ valueSize int
+ numRangeKeys int
+ }
+ var testCases []testCase
+ for _, valueSize := range []int{8, 32, 256} {
+ for _, numRangeKeys := range []int{0, 1, 100} {
+ testCases = append(testCases, testCase{
+ valueSize: valueSize,
+ numRangeKeys: numRangeKeys,
+ })
+ }
+ }
+
+ if testing.Short() {
+ // Choose a configuration for the short version.
+ testCases = []testCase{
+ {valueSize: 8, numRangeKeys: 1},
+ }
+ }
+
+ for _, tc := range testCases {
+ name := fmt.Sprintf(
+ "valueSize=%d/numRangeKeys=%d",
+ tc.valueSize, tc.numRangeKeys,
+ )
+
+ b.Run(name, func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCComputeStats(ctx, b, tc.valueSize, tc.numRangeKeys)
+ })
+ }
+}
+
+func BenchmarkMVCCFindSplitKey_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+ for _, valueSize := range []int{32} {
+ b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCFindSplitKey(ctx, b, valueSize)
+ })
+ }
+}
+
+func BenchmarkMVCCPut_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ type testCase struct {
+ valueSize int
+ versions int
+ }
+ var testCases []testCase
+
+ for _, valueSize := range []int{10, 100, 1000, 10000} {
+ for _, versions := range []int{1, 10} {
+ testCases = append(testCases, testCase{
+ valueSize: valueSize,
+ versions: versions,
+ })
+ }
+ }
+
+ if testing.Short() {
+ // Choose a few configurations for the short version.
+ testCases = []testCase{
+ {valueSize: 10, versions: 1},
+ {valueSize: 1000, versions: 10},
+ }
+ }
+
+ for _, tc := range testCases {
+ // We use "batch=false" so that we can compare with corresponding benchmarks in older branches.
+ name := fmt.Sprintf("batch=false/valueSize=%d/versions=%d", tc.valueSize, tc.versions)
+ b.Run(name, func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCPut(ctx, b, setupMVCCInMemPebble, tc.valueSize, tc.versions)
+ })
+ }
+}
+
+func BenchmarkMVCCBlindPut_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ valueSizes := []int{10, 100, 1000, 10000}
+ if testing.Short() {
+ valueSizes = []int{10, 10000}
+ }
+
+ for _, valueSize := range valueSizes {
+ b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCBlindPut(ctx, b, setupMVCCInMemPebble, valueSize)
+ })
+ }
+}
+
+func BenchmarkMVCCConditionalPut_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ valueSizes := []int{10, 100, 1000, 10000}
+ if testing.Short() {
+ valueSizes = []int{10, 10000}
+ }
+
+ for _, createFirst := range []bool{false, true} {
+ prefix := "Create"
+ if createFirst {
+ prefix = "Replace"
+ }
+ b.Run(prefix, func(b *testing.B) {
+ for _, valueSize := range valueSizes {
+ b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCConditionalPut(ctx, b, setupMVCCInMemPebble, valueSize, createFirst)
+ })
+ }
+ })
+ }
+}
+
+func BenchmarkMVCCBlindConditionalPut_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ valueSizes := []int{10, 100, 1000, 10000}
+ if testing.Short() {
+ valueSizes = []int{10, 10000}
+ }
+
+ for _, valueSize := range valueSizes {
+ b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCBlindConditionalPut(ctx, b, setupMVCCInMemPebble, valueSize)
+ })
+ }
+}
+
+func BenchmarkMVCCInitPut_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ valueSizes := []int{10, 100, 1000, 10000}
+ if testing.Short() {
+ valueSizes = []int{10, 10000}
+ }
+
+ for _, valueSize := range valueSizes {
+ b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCInitPut(ctx, b, setupMVCCInMemPebble, valueSize)
+ })
+ }
+}
+
+func BenchmarkMVCCBlindInitPut_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ valueSizes := []int{10, 100, 1000, 10000}
+ if testing.Short() {
+ valueSizes = []int{10, 10000}
+ }
+
+ for _, valueSize := range valueSizes {
+ b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCBlindInitPut(ctx, b, setupMVCCInMemPebble, valueSize)
+ })
+ }
+}
+
+func BenchmarkMVCCPutDelete_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+ ctx := context.Background()
+ db := setupMVCCInMemPebble(b, "put_delete")
+ defer db.Close()
+
+ r := rand.New(rand.NewSource(timeutil.Now().UnixNano()))
+ value := roachpb.MakeValueFromBytes(randutil.RandBytes(r, 10))
+ var blockNum int64
+
+ for i := 0; i < b.N; i++ {
+ blockID := r.Int63()
+ blockNum++
+ key := encoding.EncodeVarintAscending(nil, blockID)
+ key = encoding.EncodeVarintAscending(key, blockNum)
+
+ if _, err := MVCCPut(ctx, db, key, hlc.Timestamp{}, value, MVCCWriteOptions{}); err != nil {
+ b.Fatal(err)
+ }
+ if _, _, err := MVCCDelete(ctx, db, key, hlc.Timestamp{}, MVCCWriteOptions{}); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkMVCCBatchPut_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ batchSizes := []int{10, 100, 1000, 10000}
+ if testing.Short() {
+ batchSizes = []int{10, 10000}
+ }
+
+ for _, valueSize := range []int{10} {
+ b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
+ for _, batchSize := range batchSizes {
+ b.Run(fmt.Sprintf("batchSize=%d", batchSize), func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCBatchPut(ctx, b, setupMVCCInMemPebble, valueSize, batchSize)
+ })
+ }
+ })
+ }
+}
+
+func BenchmarkMVCCBatchTimeSeries_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+ ctx := context.Background()
+ for _, batchSize := range []int{282} {
+ b.Run(fmt.Sprintf("batchSize=%d", batchSize), func(b *testing.B) {
+ runMVCCBatchTimeSeries(ctx, b, setupMVCCInMemPebble, batchSize)
+ })
+ }
+}
+
+// BenchmarkMVCCGetMergedTimeSeries computes performance of reading merged
+// time series data using `MVCCGet()`. Uses an in-memory engine.
+func BenchmarkMVCCGetMergedTimeSeries_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ type testCase struct {
+ numKeys int
+ mergesPerKey int
+ }
+ var testCases []testCase
+ for _, numKeys := range []int{1, 16, 256} {
+ for _, mergesPerKey := range []int{1, 16, 256} {
+ testCases = append(testCases, testCase{
+ numKeys: numKeys,
+ mergesPerKey: mergesPerKey,
+ })
+ }
+ }
+
+ if testing.Short() {
+ // Choose a configuration for the short version.
+ testCases = []testCase{
+ {numKeys: 16, mergesPerKey: 16},
+ }
+ }
+
+ for _, tc := range testCases {
+ name := fmt.Sprintf("numKeys=%d/mergesPerKey=%d", tc.numKeys, tc.mergesPerKey)
+ b.Run(name, func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCGetMergedValue(ctx, b, setupMVCCInMemPebble, tc.numKeys, tc.mergesPerKey)
+ })
+ }
+}
+
+// DeleteRange benchmarks below (using on-disk data).
+//
+// TODO(peter): Benchmark{MVCCDeleteRange,ClearRange,ClearIterRange}_Pebble
+// give nonsensical results (DeleteRange is absurdly slow and ClearRange
+// reports a processing speed of 481 million MB/s!). We need to take a look at
+// what these benchmarks are trying to measure, and fix them.
+
+func BenchmarkMVCCDeleteRange_Pebble(b *testing.B) {
+ // TODO(radu): run one configuration under Short once the above TODO is
+ // resolved.
+ skip.UnderShort(b)
+ defer log.Scope(b).Close(b)
+ ctx := context.Background()
+ for _, valueSize := range []int{8, 32, 256} {
+ b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
+ runMVCCDeleteRange(ctx, b, valueSize)
+ })
+ }
+}
+
+func BenchmarkMVCCDeleteRangeUsingTombstone_Pebble(b *testing.B) {
+ // TODO(radu): run one configuration under Short once the above TODO is
+ // resolved.
+ skip.UnderShort(b)
+ defer log.Scope(b).Close(b)
+ ctx := context.Background()
+ for _, numKeys := range []int{1000, 10000, 100000} {
+ b.Run(fmt.Sprintf("numKeys=%d", numKeys), func(b *testing.B) {
+ for _, valueSize := range []int{64} {
+ b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
+ for _, entireRange := range []bool{false, true} {
+ b.Run(fmt.Sprintf("entireRange=%t", entireRange), func(b *testing.B) {
+ runMVCCDeleteRangeUsingTombstone(ctx, b, numKeys, valueSize, entireRange)
+ })
+ }
+ })
+ }
+ })
+ }
+}
+
+// BenchmarkMVCCDeleteRangeWithPredicate_Pebble benchmarks predicate based
+// delete range under certain configs. A lower streak bound simulates sequential
+// imports with more interspersed keys, leading to fewer range tombstones and
+// more point tombstones.
+func BenchmarkMVCCDeleteRangeWithPredicate_Pebble(b *testing.B) {
+ // TODO(radu): run one configuration under Short once the above TODO is
+ // resolved.
+ skip.UnderShort(b)
+ defer log.Scope(b).Close(b)
+ ctx := context.Background()
+ for _, streakBound := range []int{10, 100, 200, 500} {
+ b.Run(fmt.Sprintf("streakBound=%d", streakBound), func(b *testing.B) {
+ for _, rangeKeyThreshold := range []int64{64} {
+ b.Run(fmt.Sprintf("rangeKeyThreshold=%d", rangeKeyThreshold), func(b *testing.B) {
+ config := mvccImportedData{
+ streakBound: streakBound,
+ keyCount: 2000,
+ valueBytes: 64,
+ layers: 2,
+ }
+ runMVCCDeleteRangeWithPredicate(ctx, b, config, 0, rangeKeyThreshold)
+ })
+ }
+ })
+ }
+}
+
+func BenchmarkClearMVCCVersions_Pebble(b *testing.B) {
+ // TODO(radu): run one configuration under Short once the above TODO is
+ // resolved.
+ skip.UnderShort(b)
+ defer log.Scope(b).Close(b)
+ ctx := context.Background()
+ runClearRange(ctx, b, func(eng Engine, batch Batch, start, end MVCCKey) error {
+ return batch.ClearMVCCVersions(start, end)
+ })
+}
+
+func BenchmarkClearMVCCIteratorRange_Pebble(b *testing.B) {
+ ctx := context.Background()
+ defer log.Scope(b).Close(b)
+ runClearRange(ctx, b, func(eng Engine, batch Batch, start, end MVCCKey) error {
+ return batch.ClearMVCCIteratorRange(start.Key, end.Key, true, true)
+ })
+}
+
+func BenchmarkBatchApplyBatchRepr_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ type testCase struct {
+ indexed bool
+ sequential bool
+ valueSize int
+ batchSize int
+ }
+ var testCases []testCase
+
+ for _, indexed := range []bool{false, true} {
+ for _, sequential := range []bool{false, true} {
+ for _, valueSize := range []int{10} {
+ for _, batchSize := range []int{10000} {
+ testCases = append(testCases, testCase{
+ indexed: indexed,
+ sequential: sequential,
+ valueSize: valueSize,
+ batchSize: batchSize,
+ })
+ }
+ }
+ }
+ }
+
+ if testing.Short() {
+ // Choose a configuration for the short version.
+ testCases = []testCase{
+ {indexed: true, sequential: false, valueSize: 10, batchSize: 8},
+ }
+ }
+
+ for _, tc := range testCases {
+ name := fmt.Sprintf(
+ "indexed=%t/seq=%t/valueSize=%d/batchSize=%d",
+ tc.indexed, tc.sequential, tc.valueSize, tc.batchSize,
+ )
+
+ b.Run(name, func(b *testing.B) {
+ ctx := context.Background()
+ runBatchApplyBatchRepr(ctx, b, setupMVCCInMemPebble,
+ tc.indexed, tc.sequential, tc.valueSize, tc.batchSize)
+ })
+ }
+}
+
+type acquireLockTestCase struct {
+ batch bool
+ heldOtherTxn bool
+ heldSameTxn bool
+ strength lock.Strength
+}
+
+func (tc acquireLockTestCase) name() string {
+ return fmt.Sprintf(
+ "batch=%t/heldOtherTxn=%t/heldSameTxn=%t/strength=%s",
+ tc.batch, tc.heldOtherTxn, tc.heldSameTxn, tc.strength,
+ )
+}
+
+func acquireLockTestCases() []acquireLockTestCase {
+ var res []acquireLockTestCase
+ for _, batch := range []bool{false, true} {
+ for _, heldOtherTxn := range []bool{false, true} {
+ for _, heldSameTxn := range []bool{false, true} {
+ if heldOtherTxn && heldSameTxn {
+ continue // not possible
+ }
+ for _, strength := range []lock.Strength{lock.Shared, lock.Exclusive} {
+ res = append(res, acquireLockTestCase{
+ batch: batch,
+ heldOtherTxn: heldOtherTxn,
+ heldSameTxn: heldSameTxn,
+ strength: strength,
+ })
+ }
+ }
+ }
+ }
+ return res
+}
+
+func BenchmarkMVCCCheckForAcquireLock_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ for _, tc := range acquireLockTestCases() {
+ b.Run(tc.name(), func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCCheckForAcquireLock(ctx, b, setupMVCCInMemPebble, tc.batch, tc.heldOtherTxn, tc.heldSameTxn, tc.strength)
+ })
+ }
+}
+
+func BenchmarkMVCCAcquireLock_Pebble(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ for _, tc := range acquireLockTestCases() {
+ b.Run(tc.name(), func(b *testing.B) {
+ ctx := context.Background()
+ runMVCCAcquireLock(ctx, b, setupMVCCInMemPebble, tc.batch, tc.heldOtherTxn, tc.heldSameTxn, tc.strength)
+ })
+ }
+}
+
+func BenchmarkBatchBuilderPut(b *testing.B) {
+ defer log.Scope(b).Close(b)
+ value := make([]byte, 10)
+ for i := range value {
+ value[i] = byte(i)
+ }
+ keyBuf := append(make([]byte, 0, 64), []byte("key-")...)
+
+ b.ResetTimer()
+
+ const batchSize = 1000
+ var batch pebble.Batch
+ for i := 0; i < b.N; i += batchSize {
+ end := i + batchSize
+ if end > b.N {
+ end = b.N
+ }
+
+ for j := i; j < end; j++ {
+ key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(j)))
+ ts := hlc.Timestamp{WallTime: int64(j)}
+ require.NoError(b, batch.Set(EncodeMVCCKey(MVCCKey{key, ts}), value, nil /* WriteOptions */))
+ }
+ batch.Reset()
+ }
+
+ b.StopTimer()
+}
+
+func BenchmarkCheckSSTConflicts(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ type testCase struct {
+ numKeys int
+ numSSTKeys int
+ overlap bool
+ usePrefixSeek bool
+ }
+ var testCases []testCase
+
+ for _, numKeys := range []int{1000, 10000, 100000} {
+ for _, numSSTKeys := range []int{10, 100, 1000, 10000, 100000} {
+ for _, overlap := range []bool{false, true} {
+ for _, usePrefixSeek := range []bool{false, true} {
+ testCases = append(testCases, testCase{
+ numKeys: numKeys,
+ numSSTKeys: numSSTKeys,
+ overlap: overlap,
+ usePrefixSeek: usePrefixSeek,
+ })
+ }
+ }
+ }
+ }
+
+ if testing.Short() {
+ // Choose a few configurations for the short version.
+ testCases = []testCase{
+ {numKeys: 10000, numSSTKeys: 100, overlap: false, usePrefixSeek: false},
+ {numKeys: 10000, numSSTKeys: 1000, overlap: true, usePrefixSeek: true},
+ }
+ }
+
+ for _, tc := range testCases {
+ name := fmt.Sprintf(
+ "keys=%d/sstKeys=%d/overlap=%t/usePrefixSeek=%v",
+ tc.numKeys, tc.numSSTKeys, tc.overlap, tc.usePrefixSeek,
+ )
+ b.Run(name, func(b *testing.B) {
+ runCheckSSTConflicts(b, tc.numKeys, 1 /* numVersions */, tc.numSSTKeys, tc.overlap, tc.usePrefixSeek)
+ })
+ }
+}
+
+func BenchmarkSSTIterator(b *testing.B) {
+ defer log.Scope(b).Close(b)
+
+ for _, numKeys := range []int{1, 100, 10000} {
+ b.Run(fmt.Sprintf("keys=%d", numKeys), func(b *testing.B) {
+ for _, verify := range []bool{false, true} {
+ b.Run(fmt.Sprintf("verify=%t", verify), func(b *testing.B) {
+ runSSTIterator(b, numKeys, verify)
+ })
+ }
+ })
+ }
+}
diff --git a/pkg/storage/bench_test.go b/pkg/storage/bench_test.go
index d2a24cbabe95..8a2044180bd4 100644
--- a/pkg/storage/bench_test.go
+++ b/pkg/storage/bench_test.go
@@ -17,7 +17,6 @@ import (
"testing"
"time"
- "github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock"
@@ -46,42 +45,10 @@ import (
"github.com/stretchr/testify/require"
)
-func BenchmarkMVCCComputeStats(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- type testCase struct {
- valueSize int
- numRangeKeys int
- }
- var testCases []testCase
- for _, valueSize := range []int{8, 32, 256} {
- for _, numRangeKeys := range []int{0, 1, 100} {
- testCases = append(testCases, testCase{
- valueSize: valueSize,
- numRangeKeys: numRangeKeys,
- })
- }
- }
-
- if testing.Short() {
- // Choose a configuration for the short version.
- testCases = []testCase{
- {valueSize: 8, numRangeKeys: 1},
- }
- }
-
- for _, tc := range testCases {
- name := fmt.Sprintf(
- "valueSize=%d/numRangeKeys=%d",
- tc.valueSize, tc.numRangeKeys,
- )
-
- b.Run(name, func(b *testing.B) {
- ctx := context.Background()
- runMVCCComputeStats(ctx, b, tc.valueSize, tc.numRangeKeys)
- })
- }
-}
+// Note: most benchmarks in this package have an engine-specific Benchmark
+// function (see bench_rocksdb_test.go and bench_pebble_test.go). The newer
+// Benchmarks with a unified implementation are here at the top of this file
+// with the business logic for the implementation of the other tests following.
func BenchmarkMVCCGarbageCollect(b *testing.B) {
skip.UnderShort(b)
@@ -163,56 +130,6 @@ func BenchmarkMVCCGarbageCollect(b *testing.B) {
}
}
-func BenchmarkMVCCGet(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- type testCase struct {
- batch bool
- numVersions int
- valueSize int
- numRangeKeys int
- }
- var testCases []testCase
- for _, batch := range []bool{false, true} {
- for _, numVersions := range []int{1, 10, 100} {
- for _, valueSize := range []int{8} {
- for _, numRangeKeys := range []int{0, 1, 100} {
- testCases = append(testCases, testCase{
- batch: batch,
- numVersions: numVersions,
- valueSize: valueSize,
- numRangeKeys: numRangeKeys,
- })
- }
- }
- }
- }
-
- if testing.Short() {
- // Choose a few configurations for the short version.
- testCases = []testCase{
- {batch: false, numVersions: 1, valueSize: 8, numRangeKeys: 0},
- {batch: true, numVersions: 10, valueSize: 8, numRangeKeys: 0},
- {batch: true, numVersions: 10, valueSize: 8, numRangeKeys: 10},
- }
- }
-
- for _, tc := range testCases {
- name := fmt.Sprintf(
- "batch=%t/versions=%d/valueSize=%d/numRangeKeys=%d",
- tc.batch, tc.numVersions, tc.valueSize, tc.numRangeKeys,
- )
- b.Run(name, func(b *testing.B) {
- ctx := context.Background()
- runMVCCGet(ctx, b, mvccBenchData{
- numVersions: tc.numVersions,
- valueBytes: tc.valueSize,
- numRangeKeys: tc.numRangeKeys,
- }, tc.batch)
- })
- }
-}
-
func BenchmarkMVCCExportToSST(b *testing.B) {
skip.UnderShort(b)
defer log.Scope(b).Close(b)
@@ -318,16 +235,6 @@ func BenchmarkMVCCExportToSST(b *testing.B) {
}
}
-func BenchmarkMVCCFindSplitKey(b *testing.B) {
- defer log.Scope(b).Close(b)
- for _, valueSize := range []int{32} {
- b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
- ctx := context.Background()
- runMVCCFindSplitKey(ctx, b, valueSize)
- })
- }
-}
-
const numIntentKeys = 1000
// setupKeysWithIntent writes keys using transactions to eng. The number of
@@ -594,252 +501,6 @@ func BenchmarkScanAllIntentsResolved(b *testing.B) {
}
}
-func BenchmarkMVCCScan(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- type testCase struct {
- numRows int
- numVersions int
- valueSize int
- numRangeKeys int
- includeHeader bool
- }
- var testCases []testCase
- for _, numRows := range []int{1, 10, 100, 1000, 10000, 50000} {
- for _, numVersions := range []int{1, 2, 10, 100, 1000} {
- for _, valueSize := range []int{8, 64, 512} {
- for _, numRangeKeys := range []int{0, 1, 100} {
- testCases = append(testCases, testCase{
- numRows: numRows,
- numVersions: numVersions,
- valueSize: valueSize,
- numRangeKeys: numRangeKeys,
- })
- }
- }
- }
- }
-
- if testing.Short() {
- // Choose a few configurations for the short version.
- testCases = []testCase{
- {numRows: 1, numVersions: 1, valueSize: 8, numRangeKeys: 0},
- {numRows: 100, numVersions: 2, valueSize: 64, numRangeKeys: 1},
- {numRows: 1000, numVersions: 10, valueSize: 64, numRangeKeys: 100},
- }
- }
-
- testCases = append(testCases, testCase{
- numRows: 1000,
- numVersions: 2,
- valueSize: 64,
- numRangeKeys: 0,
- includeHeader: true,
- })
-
- for _, tc := range testCases {
- name := fmt.Sprintf(
- "rows=%d/versions=%d/valueSize=%d/numRangeKeys=%d/headers=%v",
- tc.numRows, tc.numVersions, tc.valueSize, tc.numRangeKeys, tc.includeHeader,
- )
- b.Run(name, func(b *testing.B) {
- ctx := context.Background()
- runMVCCScan(ctx, b, benchScanOptions{
- mvccBenchData: mvccBenchData{
- numVersions: tc.numVersions,
- valueBytes: tc.valueSize,
- numRangeKeys: tc.numRangeKeys,
- includeHeader: tc.includeHeader,
- },
- numRows: tc.numRows,
- reverse: false,
- })
- })
- }
-}
-
-func BenchmarkMVCCScanGarbage(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- type testCase struct {
- numRows int
- numVersions int
- numRangeKeys int
- tombstones bool
- }
- var testCases []testCase
- for _, numRows := range []int{1, 10, 100, 1000, 10000, 50000} {
- for _, numVersions := range []int{1, 2, 10, 100, 1000} {
- for _, numRangeKeys := range []int{0, 1, 100} {
- for _, tombstones := range []bool{false, true} {
- testCases = append(testCases, testCase{
- numRows: numRows,
- numVersions: numVersions,
- numRangeKeys: numRangeKeys,
- tombstones: tombstones,
- })
- }
- }
- }
- }
-
- if testing.Short() {
- // Choose a few configurations for the short version.
- testCases = []testCase{
- {numRows: 1, numVersions: 1, numRangeKeys: 0, tombstones: false},
- {numRows: 10, numVersions: 2, numRangeKeys: 1, tombstones: true},
- {numRows: 1000, numVersions: 10, numRangeKeys: 100, tombstones: true},
- }
- }
-
- for _, tc := range testCases {
- name := fmt.Sprintf(
- "rows=%d/versions=%d/numRangeKeys=%d/tombstones=%t",
- tc.numRows, tc.numVersions, tc.numRangeKeys, tc.tombstones,
- )
- b.Run(name, func(b *testing.B) {
- ctx := context.Background()
- runMVCCScan(ctx, b, benchScanOptions{
- mvccBenchData: mvccBenchData{
- numVersions: tc.numVersions,
- numRangeKeys: tc.numRangeKeys,
- garbage: true,
- },
- numRows: tc.numRows,
- tombstones: tc.tombstones,
- reverse: false,
- })
- })
- }
-}
-
-func BenchmarkMVCCScanSQLRows(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- type testCase struct {
- numRows int
- numColumnFamilies int
- numVersions int
- valueSize int
- wholeRows bool
- }
- var testCases []testCase
- for _, numRows := range []int{1, 10, 100, 1000, 10000} {
- for _, numColumnFamilies := range []int{1, 3, 10} {
- for _, numVersions := range []int{1} {
- for _, valueSize := range []int{8, 64, 512} {
- for _, wholeRows := range []bool{false, true} {
- testCases = append(testCases, testCase{
- numRows: numRows,
- numColumnFamilies: numColumnFamilies,
- numVersions: numVersions,
- valueSize: valueSize,
- wholeRows: wholeRows,
- })
- }
- }
- }
- }
- }
-
- if testing.Short() {
- // Choose a few configurations for the short version.
- testCases = []testCase{
- {numRows: 1, numColumnFamilies: 1, numVersions: 1, valueSize: 8, wholeRows: false},
- {numRows: 100, numColumnFamilies: 10, numVersions: 1, valueSize: 8, wholeRows: true},
- {numRows: 1000, numColumnFamilies: 3, numVersions: 1, valueSize: 64, wholeRows: true},
- }
- }
-
- for _, tc := range testCases {
- name := fmt.Sprintf(
- "rows=%d/columnFamillies=%d/versions=%d/valueSize=%d/wholeRows=%t",
- tc.numRows, tc.numColumnFamilies, tc.numVersions, tc.valueSize, tc.wholeRows,
- )
- b.Run(name, func(b *testing.B) {
- ctx := context.Background()
- runMVCCScan(ctx, b, benchScanOptions{
- mvccBenchData: mvccBenchData{
- numColumnFamilies: tc.numColumnFamilies,
- numVersions: tc.numVersions,
- valueBytes: tc.valueSize,
- },
- numRows: tc.numRows,
- reverse: false,
- wholeRows: tc.wholeRows,
- })
- })
- }
-}
-
-func BenchmarkMVCCReverseScan(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- type testCase struct {
- numRows int
- numVersions int
- valueSize int
- numRangeKeys int
- }
- var testCases []testCase
- for _, numRows := range []int{1, 10, 100, 1000, 10000, 50000} {
- for _, numVersions := range []int{1, 2, 10, 100, 1000} {
- for _, valueSize := range []int{8, 64, 512} {
- for _, numRangeKeys := range []int{0, 1, 100} {
- testCases = append(testCases, testCase{
- numRows: numRows,
- numVersions: numVersions,
- valueSize: valueSize,
- numRangeKeys: numRangeKeys,
- })
- }
- }
- }
- }
-
- if testing.Short() {
- // Choose a few configurations for the short version.
- testCases = []testCase{
- {numRows: 1, numVersions: 1, valueSize: 8, numRangeKeys: 0},
- {numRows: 100, numVersions: 1, valueSize: 8, numRangeKeys: 1},
- {numRows: 1000, numVersions: 2, valueSize: 64, numRangeKeys: 100},
- }
- }
-
- for _, tc := range testCases {
- name := fmt.Sprintf(
- "rows=%d/versions=%d/valueSize=%d/numRangeKeys=%d",
- tc.numRows, tc.numVersions, tc.valueSize, tc.numRangeKeys,
- )
- b.Run(name, func(b *testing.B) {
- ctx := context.Background()
- runMVCCScan(ctx, b, benchScanOptions{
- mvccBenchData: mvccBenchData{
- numVersions: tc.numVersions,
- valueBytes: tc.valueSize,
- numRangeKeys: tc.numRangeKeys,
- },
- numRows: tc.numRows,
- reverse: true,
- })
- })
- }
-}
-
-func BenchmarkMVCCScanTransactionalData(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- ctx := context.Background()
- runMVCCScan(ctx, b, benchScanOptions{
- numRows: 10000,
- mvccBenchData: mvccBenchData{
- numVersions: 2,
- valueBytes: 8,
- transactional: true,
- },
- })
-}
-
// BenchmarkScanOneAllIntentsResolved compares separated and interleaved
// intents, when reading the latest version for a range of keys, when all the
// intents have been resolved. Unlike the previous benchmark, each scan reads
@@ -1230,6 +891,32 @@ func runMVCCGet(ctx context.Context, b *testing.B, opts mvccBenchData, useBatch
b.StopTimer()
}
+func runMVCCPut(ctx context.Context, b *testing.B, emk engineMaker, valueSize, versions int) {
+ rng, _ := randutil.NewTestRand()
+ value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize))
+ keyBuf := append(make([]byte, 0, 64), []byte("key-")...)
+
+ eng := emk(b, fmt.Sprintf("put_%d", valueSize))
+ defer eng.Close()
+
+ rw := ReadWriter(eng)
+
+ b.SetBytes(int64(valueSize))
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ for j := 0; j < versions; j++ {
+ key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
+ ts := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}
+ if _, err := MVCCPut(ctx, rw, key, ts, value, MVCCWriteOptions{}); err != nil {
+ b.Fatalf("failed put: %+v", err)
+ }
+ }
+ }
+
+ b.StopTimer()
+}
+
func runMVCCBlindPut(ctx context.Context, b *testing.B, emk engineMaker, valueSize int) {
rng, _ := randutil.NewTestRand()
value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize))
@@ -1244,13 +931,9 @@ func runMVCCBlindPut(ctx context.Context, b *testing.B, emk engineMaker, valueSi
for i := 0; i < b.N; i++ {
key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
ts := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}
- batch := eng.NewWriteBatch()
- if _, err := MVCCBlindPut(ctx, batch, key, ts, value, MVCCWriteOptions{}); err != nil {
+ if _, err := MVCCBlindPut(ctx, eng, key, ts, value, MVCCWriteOptions{}); err != nil {
b.Fatalf("failed put: %+v", err)
}
- if err := batch.Commit(true); err != nil {
- b.Fatalf("failed commit: %v", err)
- }
}
b.StopTimer()
@@ -1272,13 +955,9 @@ func runMVCCConditionalPut(
for i := 0; i < b.N; i++ {
key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
ts := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}
- batch := eng.NewBatch()
- if _, err := MVCCPut(ctx, batch, key, ts, value, MVCCWriteOptions{}); err != nil {
+ if _, err := MVCCPut(ctx, eng, key, ts, value, MVCCWriteOptions{}); err != nil {
b.Fatalf("failed put: %+v", err)
}
- if err := batch.Commit(true); err != nil {
- b.Fatalf("failed commit: %v", err)
- }
}
expected = value.TagAndDataBytes()
}
@@ -1288,13 +967,9 @@ func runMVCCConditionalPut(
for i := 0; i < b.N; i++ {
key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
ts := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}
- batch := eng.NewBatch()
- if _, err := MVCCConditionalPut(ctx, batch, key, ts, value, expected, ConditionalPutWriteOptions{AllowIfDoesNotExist: CPutFailIfMissing}); err != nil {
+ if _, err := MVCCConditionalPut(ctx, eng, key, ts, value, expected, ConditionalPutWriteOptions{AllowIfDoesNotExist: CPutFailIfMissing}); err != nil {
b.Fatalf("failed put: %+v", err)
}
- if err := batch.Commit(true); err != nil {
- b.Fatalf("failed commit: %v", err)
- }
}
b.StopTimer()
@@ -1314,15 +989,11 @@ func runMVCCBlindConditionalPut(ctx context.Context, b *testing.B, emk engineMak
for i := 0; i < b.N; i++ {
key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
ts := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}
- batch := eng.NewWriteBatch()
if _, err := MVCCBlindConditionalPut(
- ctx, batch, key, ts, value, nil, ConditionalPutWriteOptions{AllowIfDoesNotExist: CPutFailIfMissing},
+ ctx, eng, key, ts, value, nil, ConditionalPutWriteOptions{AllowIfDoesNotExist: CPutFailIfMissing},
); err != nil {
b.Fatalf("failed put: %+v", err)
}
- if err := batch.Commit(true); err != nil {
- b.Fatalf("failed commit: %v", err)
- }
}
b.StopTimer()
@@ -1342,13 +1013,9 @@ func runMVCCInitPut(ctx context.Context, b *testing.B, emk engineMaker, valueSiz
for i := 0; i < b.N; i++ {
key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
ts := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}
- batch := eng.NewBatch()
- if _, err := MVCCInitPut(ctx, batch, key, ts, value, false, MVCCWriteOptions{}); err != nil {
+ if _, err := MVCCInitPut(ctx, eng, key, ts, value, false, MVCCWriteOptions{}); err != nil {
b.Fatalf("failed put: %+v", err)
}
- if err := batch.Commit(true); err != nil {
- b.Fatalf("failed commit: %v", err)
- }
}
b.StopTimer()
@@ -1368,13 +1035,9 @@ func runMVCCBlindInitPut(ctx context.Context, b *testing.B, emk engineMaker, val
for i := 0; i < b.N; i++ {
key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
ts := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}
- wb := eng.NewWriteBatch()
- if _, err := MVCCBlindInitPut(ctx, wb, key, ts, value, false, MVCCWriteOptions{}); err != nil {
+ if _, err := MVCCBlindInitPut(ctx, eng, key, ts, value, false, MVCCWriteOptions{}); err != nil {
b.Fatalf("failed put: %+v", err)
}
- if err := wb.Commit(true); err != nil {
- b.Fatalf("failed commit: %v", err)
- }
}
b.StopTimer()
@@ -1573,9 +1236,7 @@ func runMVCCDeleteRangeUsingTombstone(
defer eng.Close()
ms, err := ComputeStats(ctx, eng, keys.LocalMax, keys.MaxKey, 0)
- if err != nil {
- b.Fatal(err)
- }
+ require.NoError(b, err)
leftPeekBound = keys.LocalMax
rightPeekBound = keys.MaxKey
@@ -1657,12 +1318,8 @@ func runMVCCDeleteRangeWithPredicate(
0,
)
b.StopTimer()
- if err != nil {
- b.Fatal(err)
- }
- if resumeSpan != nil {
- b.Fatalf("unexpected resume span: %v", resumeSpan)
- }
+ require.NoError(b, err)
+ require.Nil(b, resumeSpan)
}()
}
}
@@ -2000,13 +1657,9 @@ func runMVCCAcquireLockCommon(
}
// Acquire a shared and an exclusive lock on the key.
err := MVCCAcquireLock(ctx, eng, txn, lock.Shared, key, nil, 0, 0)
- if err != nil {
- b.Fatal(err)
- }
+ require.NoError(b, err)
err = MVCCAcquireLock(ctx, eng, txn, lock.Exclusive, key, nil, 0, 0)
- if err != nil {
- b.Fatal(err)
- }
+ require.NoError(b, err)
}
}
@@ -2030,11 +1683,9 @@ func runMVCCAcquireLockCommon(
err = MVCCAcquireLock(ctx, rw, txn, strength, key, ms, 0, 0)
}
if heldOtherTxn {
- if err == nil {
- b.Fatalf("expected error but got %s", err)
- }
- } else if err != nil {
- b.Fatal(err)
+ require.Error(b, err)
+ } else {
+ require.NoError(b, err)
}
}
@@ -2092,17 +1743,10 @@ func runMVCCExportToSST(b *testing.B, opts mvccExportToSSTOpts) {
}
startKey := mkKey(start)
endKey := mkKey(end)
- err := MVCCDeleteRangeUsingTombstone(
- ctx, batch, nil, startKey, endKey, ts, hlc.ClockTimestamp{}, nil, nil, false, 0, 0, nil)
- if err != nil {
- b.Fatal(err)
- }
-
- }
- err := batch.Commit(false /* sync */)
- if err != nil {
- b.Fatal(err)
+ require.NoError(b, MVCCDeleteRangeUsingTombstone(
+ ctx, batch, nil, startKey, endKey, ts, hlc.ClockTimestamp{}, nil, nil, false, 0, 0, nil))
}
+ require.NoError(b, batch.Commit(false /* sync */))
}()
batch := engine.NewBatch()
@@ -2182,15 +1826,11 @@ func runMVCCExportToSST(b *testing.B, opts mvccExportToSSTOpts) {
b.StopTimer()
if i == 0 {
- if buf.Len() == 0 {
- b.Fatalf("empty SST")
- }
+ require.NotZero(b, buf.Len())
assertLen = buf.Len()
}
- if buf.Len() != assertLen {
- b.Fatalf("unexpected SST size: %d, expected %d", buf.Len(), assertLen)
- }
+ require.Equal(b, assertLen, buf.Len())
}
// Run sanity checks on last produced SST.
@@ -2201,17 +1841,13 @@ func runMVCCExportToSST(b *testing.B, opts mvccExportToSSTOpts) {
KeyTypes: IterKeyTypePointsAndRanges,
},
)
- if err != nil {
- b.Fatal(err)
- }
it.SeekGE(MakeMVCCMetadataKey(roachpb.LocalMax))
+ require.NoError(b, err)
var n int // points
var r int // range keys (within stacks)
for {
ok, err := it.Valid()
- if err != nil {
- b.Fatal(err)
- }
+ require.NoError(b, err)
if !ok {
break
}
@@ -2224,16 +1860,13 @@ func runMVCCExportToSST(b *testing.B, opts mvccExportToSSTOpts) {
}
it.Next()
}
- if expKVsInSST != n {
- b.Fatalf("unexpected number of keys in SST: %d, expected %d", n, expKVsInSST)
- }
+ require.Equal(b, expKVsInSST, n)
// Should not see any rangedel stacks if startTS is set.
if opts.numRangeKeys > 0 && startWall == 0 && opts.exportAllRevisions {
- if r < opts.numRangeKeys {
- b.Fatalf("unexpected number of range keys in SST: %d, expected at least %d", r, opts.numRangeKeys)
- }
- } else if r != 0 {
- b.Fatalf("unexpected number of range keys in SST: %d, expected 0", r)
+ require.NotZero(b, r)
+ require.GreaterOrEqual(b, r, opts.numRangeKeys)
+ } else {
+ require.Zero(b, r)
}
}
@@ -2297,9 +1930,7 @@ func runCheckSSTConflicts(
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := CheckSSTConflicts(context.Background(), sstFile.Data(), eng, sstStart, sstEnd, sstStart.Key, sstEnd.Key.Next(), false, hlc.Timestamp{}, hlc.Timestamp{}, math.MaxInt64, 0, usePrefixSeek)
- if err != nil {
- b.Fatal(err)
- }
+ require.NoError(b, err)
}
}
@@ -2527,513 +2158,3 @@ func BenchmarkMVCCScannerWithIntentsAndVersions(b *testing.B) {
ro.Close()
}
}
-
-func BenchmarkMVCCBlindPut(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- valueSizes := []int{10, 100, 1000, 10000}
- if testing.Short() {
- valueSizes = []int{10, 10000}
- }
-
- for _, valueSize := range valueSizes {
- b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
- ctx := context.Background()
- runMVCCBlindPut(ctx, b, setupMVCCInMemPebble, valueSize)
- })
- }
-}
-
-func BenchmarkMVCCConditionalPut(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- valueSizes := []int{10, 100, 1000, 10000}
- if testing.Short() {
- valueSizes = []int{10, 10000}
- }
-
- for _, createFirst := range []bool{false, true} {
- prefix := "Create"
- if createFirst {
- prefix = "Replace"
- }
- b.Run(prefix, func(b *testing.B) {
- for _, valueSize := range valueSizes {
- b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
- ctx := context.Background()
- runMVCCConditionalPut(ctx, b, setupMVCCInMemPebble, valueSize, createFirst)
- })
- }
- })
- }
-}
-
-func BenchmarkMVCCBlindConditionalPut(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- valueSizes := []int{10, 100, 1000, 10000}
- if testing.Short() {
- valueSizes = []int{10, 10000}
- }
-
- for _, valueSize := range valueSizes {
- b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
- ctx := context.Background()
- runMVCCBlindConditionalPut(ctx, b, setupMVCCInMemPebble, valueSize)
- })
- }
-}
-
-func BenchmarkMVCCInitPut(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- valueSizes := []int{10, 100, 1000, 10000}
- if testing.Short() {
- valueSizes = []int{10, 10000}
- }
-
- for _, valueSize := range valueSizes {
- b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
- ctx := context.Background()
- runMVCCInitPut(ctx, b, setupMVCCInMemPebble, valueSize)
- })
- }
-}
-
-func BenchmarkMVCCBlindInitPut(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- valueSizes := []int{10, 100, 1000, 10000}
- if testing.Short() {
- valueSizes = []int{10, 10000}
- }
-
- for _, valueSize := range valueSizes {
- b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
- ctx := context.Background()
- runMVCCBlindInitPut(ctx, b, setupMVCCInMemPebble, valueSize)
- })
- }
-}
-
-func BenchmarkMVCCPutDelete(b *testing.B) {
- defer log.Scope(b).Close(b)
- ctx := context.Background()
- db := setupMVCCInMemPebble(b, "put_delete")
- defer db.Close()
-
- r := rand.New(rand.NewSource(timeutil.Now().UnixNano()))
- value := roachpb.MakeValueFromBytes(randutil.RandBytes(r, 10))
- var blockNum int64
-
- for i := 0; i < b.N; i++ {
- blockID := r.Int63()
- blockNum++
- key := encoding.EncodeVarintAscending(nil, blockID)
- key = encoding.EncodeVarintAscending(key, blockNum)
-
- if _, err := MVCCPut(ctx, db, key, hlc.Timestamp{}, value, MVCCWriteOptions{}); err != nil {
- b.Fatal(err)
- }
- if _, _, err := MVCCDelete(ctx, db, key, hlc.Timestamp{}, MVCCWriteOptions{}); err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func BenchmarkMVCCBatchPut(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- batchSizes := []int{10, 100, 1000, 10000}
- if testing.Short() {
- batchSizes = []int{10, 10000}
- }
-
- for _, valueSize := range []int{10} {
- b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
- for _, batchSize := range batchSizes {
- b.Run(fmt.Sprintf("batchSize=%d", batchSize), func(b *testing.B) {
- ctx := context.Background()
- runMVCCBatchPut(ctx, b, setupMVCCInMemPebble, valueSize, batchSize)
- })
- }
- })
- }
-}
-
-func BenchmarkMVCCBatchTimeSeries(b *testing.B) {
- defer log.Scope(b).Close(b)
- ctx := context.Background()
- for _, batchSize := range []int{282} {
- b.Run(fmt.Sprintf("batchSize=%d", batchSize), func(b *testing.B) {
- runMVCCBatchTimeSeries(ctx, b, setupMVCCInMemPebble, batchSize)
- })
- }
-}
-
-// BenchmarkMVCCGetMergedTimeSeries computes performance of reading merged
-// time series data using `MVCCGet()`. Uses an in-memory engine.
-func BenchmarkMVCCGetMergedTimeSeries(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- type testCase struct {
- numKeys int
- mergesPerKey int
- }
- var testCases []testCase
- for _, numKeys := range []int{1, 16, 256} {
- for _, mergesPerKey := range []int{1, 16, 256} {
- testCases = append(testCases, testCase{
- numKeys: numKeys,
- mergesPerKey: mergesPerKey,
- })
- }
- }
-
- if testing.Short() {
- // Choose a configuration for the short version.
- testCases = []testCase{
- {numKeys: 16, mergesPerKey: 16},
- }
- }
-
- for _, tc := range testCases {
- name := fmt.Sprintf("numKeys=%d/mergesPerKey=%d", tc.numKeys, tc.mergesPerKey)
- b.Run(name, func(b *testing.B) {
- ctx := context.Background()
- runMVCCGetMergedValue(ctx, b, setupMVCCInMemPebble, tc.numKeys, tc.mergesPerKey)
- })
- }
-}
-
-// DeleteRange benchmarks below (using on-disk data).
-//
-// TODO(peter): Benchmark{MVCCDeleteRange,ClearRange,ClearIterRange}
-// give nonsensical results (DeleteRange is absurdly slow and ClearRange
-// reports a processing speed of 481 million MB/s!). We need to take a look at
-// what these benchmarks are trying to measure, and fix them.
-
-func BenchmarkMVCCDeleteRange(b *testing.B) {
- // TODO(radu): run one configuration under Short once the above TODO is
- // resolved.
- skip.UnderShort(b)
- defer log.Scope(b).Close(b)
- ctx := context.Background()
- for _, valueSize := range []int{8, 32, 256} {
- b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
- runMVCCDeleteRange(ctx, b, valueSize)
- })
- }
-}
-
-func BenchmarkMVCCDeleteRangeUsingTombstone(b *testing.B) {
- // TODO(radu): run one configuration under Short once the above TODO is
- // resolved.
- skip.UnderShort(b)
- defer log.Scope(b).Close(b)
- ctx := context.Background()
- for _, numKeys := range []int{1000, 10000, 100000} {
- b.Run(fmt.Sprintf("numKeys=%d", numKeys), func(b *testing.B) {
- for _, valueSize := range []int{64} {
- b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
- for _, entireRange := range []bool{false, true} {
- b.Run(fmt.Sprintf("entireRange=%t", entireRange), func(b *testing.B) {
- runMVCCDeleteRangeUsingTombstone(ctx, b, numKeys, valueSize, entireRange)
- })
- }
- })
- }
- })
- }
-}
-
-// BenchmarkMVCCDeleteRangeWithPredicate benchmarks predicate based
-// delete range under certain configs. A lower streak bound simulates sequential
-// imports with more interspersed keys, leading to fewer range tombstones and
-// more point tombstones.
-func BenchmarkMVCCDeleteRangeWithPredicate(b *testing.B) {
- // TODO(radu): run one configuration under Short once the above TODO is
- // resolved.
- skip.UnderShort(b)
- defer log.Scope(b).Close(b)
- ctx := context.Background()
- for _, streakBound := range []int{10, 100, 200, 500} {
- b.Run(fmt.Sprintf("streakBound=%d", streakBound), func(b *testing.B) {
- for _, rangeKeyThreshold := range []int64{64} {
- b.Run(fmt.Sprintf("rangeKeyThreshold=%d", rangeKeyThreshold), func(b *testing.B) {
- config := mvccImportedData{
- streakBound: streakBound,
- keyCount: 2000,
- valueBytes: 64,
- layers: 2,
- }
- runMVCCDeleteRangeWithPredicate(ctx, b, config, 0, rangeKeyThreshold)
- })
- }
- })
- }
-}
-
-func BenchmarkClearMVCCVersions(b *testing.B) {
- // TODO(radu): run one configuration under Short once the above TODO is
- // resolved.
- skip.UnderShort(b)
- defer log.Scope(b).Close(b)
- ctx := context.Background()
- runClearRange(ctx, b, func(eng Engine, batch Batch, start, end MVCCKey) error {
- return batch.ClearMVCCVersions(start, end)
- })
-}
-
-func BenchmarkClearMVCCIteratorRange(b *testing.B) {
- ctx := context.Background()
- defer log.Scope(b).Close(b)
- runClearRange(ctx, b, func(eng Engine, batch Batch, start, end MVCCKey) error {
- return batch.ClearMVCCIteratorRange(start.Key, end.Key, true, true)
- })
-}
-
-func BenchmarkBatchApplyBatchRepr(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- type testCase struct {
- indexed bool
- sequential bool
- valueSize int
- batchSize int
- }
- var testCases []testCase
-
- for _, indexed := range []bool{false, true} {
- for _, sequential := range []bool{false, true} {
- for _, valueSize := range []int{10} {
- for _, batchSize := range []int{10000} {
- testCases = append(testCases, testCase{
- indexed: indexed,
- sequential: sequential,
- valueSize: valueSize,
- batchSize: batchSize,
- })
- }
- }
- }
- }
-
- if testing.Short() {
- // Choose a configuration for the short version.
- testCases = []testCase{
- {indexed: true, sequential: false, valueSize: 10, batchSize: 8},
- }
- }
-
- for _, tc := range testCases {
- name := fmt.Sprintf(
- "indexed=%t/seq=%t/valueSize=%d/batchSize=%d",
- tc.indexed, tc.sequential, tc.valueSize, tc.batchSize,
- )
-
- b.Run(name, func(b *testing.B) {
- ctx := context.Background()
- runBatchApplyBatchRepr(ctx, b, setupMVCCInMemPebble,
- tc.indexed, tc.sequential, tc.valueSize, tc.batchSize)
- })
- }
-}
-
-type acquireLockTestCase struct {
- batch bool
- heldOtherTxn bool
- heldSameTxn bool
- strength lock.Strength
-}
-
-func (tc acquireLockTestCase) name() string {
- return fmt.Sprintf(
- "batch=%t/heldOtherTxn=%t/heldSameTxn=%t/strength=%s",
- tc.batch, tc.heldOtherTxn, tc.heldSameTxn, tc.strength,
- )
-}
-
-func acquireLockTestCases() []acquireLockTestCase {
- var res []acquireLockTestCase
- for _, batch := range []bool{false, true} {
- for _, heldOtherTxn := range []bool{false, true} {
- for _, heldSameTxn := range []bool{false, true} {
- if heldOtherTxn && heldSameTxn {
- continue // not possible
- }
- for _, strength := range []lock.Strength{lock.Shared, lock.Exclusive} {
- res = append(res, acquireLockTestCase{
- batch: batch,
- heldOtherTxn: heldOtherTxn,
- heldSameTxn: heldSameTxn,
- strength: strength,
- })
- }
- }
- }
- }
- return res
-}
-
-func BenchmarkMVCCCheckForAcquireLock(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- for _, tc := range acquireLockTestCases() {
- b.Run(tc.name(), func(b *testing.B) {
- ctx := context.Background()
- runMVCCCheckForAcquireLock(ctx, b, setupMVCCInMemPebble, tc.batch, tc.heldOtherTxn, tc.heldSameTxn, tc.strength)
- })
- }
-}
-
-func BenchmarkMVCCAcquireLock(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- for _, tc := range acquireLockTestCases() {
- b.Run(tc.name(), func(b *testing.B) {
- ctx := context.Background()
- runMVCCAcquireLock(ctx, b, setupMVCCInMemPebble, tc.batch, tc.heldOtherTxn, tc.heldSameTxn, tc.strength)
- })
- }
-}
-
-func BenchmarkBatchBuilderPut(b *testing.B) {
- defer log.Scope(b).Close(b)
- value := make([]byte, 10)
- for i := range value {
- value[i] = byte(i)
- }
- keyBuf := append(make([]byte, 0, 64), []byte("key-")...)
-
- eng := setupMVCCInMemPebble(b, "")
- defer eng.Close()
- batch := eng.NewBatch()
-
- b.ResetTimer()
-
- const batchSize = 1000
- for i := 0; i < b.N; i += batchSize {
- end := i + batchSize
- if end > b.N {
- end = b.N
- }
-
- for j := i; j < end; j++ {
- key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(j)))
- ts := hlc.Timestamp{WallTime: int64(j + 1)} // j+1 to avoid zero timestamp
- err := batch.PutMVCC(MVCCKey{key, ts}, MVCCValue{Value: roachpb.MakeValueFromBytes(value)})
- if err != nil {
- b.Fatal(err)
- }
- }
- batch.Close()
- batch = eng.NewBatch()
- }
-
- b.StopTimer()
-}
-
-func BenchmarkCheckSSTConflicts(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- type testCase struct {
- numKeys int
- numSSTKeys int
- overlap bool
- usePrefixSeek bool
- }
- var testCases []testCase
-
- for _, numKeys := range []int{1000, 10000, 100000} {
- for _, numSSTKeys := range []int{10, 100, 1000, 10000, 100000} {
- for _, overlap := range []bool{false, true} {
- for _, usePrefixSeek := range []bool{false, true} {
- testCases = append(testCases, testCase{
- numKeys: numKeys,
- numSSTKeys: numSSTKeys,
- overlap: overlap,
- usePrefixSeek: usePrefixSeek,
- })
- }
- }
- }
- }
-
- if testing.Short() {
- // Choose a few configurations for the short version.
- testCases = []testCase{
- {numKeys: 10000, numSSTKeys: 100, overlap: false, usePrefixSeek: false},
- {numKeys: 10000, numSSTKeys: 1000, overlap: true, usePrefixSeek: true},
- }
- }
-
- for _, tc := range testCases {
- name := fmt.Sprintf(
- "keys=%d/sstKeys=%d/overlap=%t/usePrefixSeek=%v",
- tc.numKeys, tc.numSSTKeys, tc.overlap, tc.usePrefixSeek,
- )
- b.Run(name, func(b *testing.B) {
- runCheckSSTConflicts(b, tc.numKeys, 1 /* numVersions */, tc.numSSTKeys, tc.overlap, tc.usePrefixSeek)
- })
- }
-}
-
-func BenchmarkSSTIterator(b *testing.B) {
- defer log.Scope(b).Close(b)
-
- for _, numKeys := range []int{1, 100, 10000} {
- b.Run(fmt.Sprintf("keys=%d", numKeys), func(b *testing.B) {
- for _, verify := range []bool{false, true} {
- b.Run(fmt.Sprintf("verify=%t", verify), func(b *testing.B) {
- runSSTIterator(b, numKeys, verify)
- })
- }
- })
- }
-}
-
-const testCacheSize = 1 << 30 // 1 GB
-
-func setupMVCCPebble(b testing.TB, dir string) Engine {
- peb, err := Open(
- context.Background(),
- fs.MustInitPhysicalTestingEnv(dir),
- cluster.MakeTestingClusterSettings(),
- CacheSize(testCacheSize))
- if err != nil {
- b.Fatalf("could not create new pebble instance at %s: %+v", dir, err)
- }
- return peb
-}
-
-func setupMVCCInMemPebble(b testing.TB, loc string) Engine {
- return setupMVCCInMemPebbleWithSeparatedIntents(b)
-}
-
-func setupMVCCInMemPebbleWithSeparatedIntents(b testing.TB) Engine {
- peb, err := Open(
- context.Background(),
- InMemory(),
- cluster.MakeClusterSettings(),
- CacheSize(testCacheSize))
- if err != nil {
- b.Fatalf("could not create new in-mem pebble instance: %+v", err)
- }
- return peb
-}
-
-func setupPebbleInMemPebbleForLatestRelease(b testing.TB, _ string) Engine {
- ctx := context.Background()
- s := cluster.MakeClusterSettings()
- if err := clusterversion.Initialize(ctx, clusterversion.Latest.Version(),
- &s.SV); err != nil {
- b.Fatalf("failed to set current cluster version: %+v", err)
- }
-
- peb, err := Open(ctx, InMemory(), s, CacheSize(testCacheSize))
- if err != nil {
- b.Fatalf("could not create new in-mem pebble instance: %+v", err)
- }
- return peb
-}
diff --git a/pkg/storage/engine_key_test.go b/pkg/storage/engine_key_test.go
index 6077e5f08df9..87f0e018f796 100644
--- a/pkg/storage/engine_key_test.go
+++ b/pkg/storage/engine_key_test.go
@@ -489,26 +489,3 @@ func engineKey(key string, ts int) EngineKey {
Version: encodeMVCCTimestamp(wallTS(ts)),
}
}
-
-var possibleVersionLens = []int{
- engineKeyNoVersion,
- engineKeyVersionWallTimeLen,
- engineKeyVersionWallAndLogicalTimeLen,
- engineKeyVersionWallLogicalAndSyntheticTimeLen,
- engineKeyVersionLockTableLen,
-}
-
-func randomSerializedEngineKey(r *rand.Rand, maxUserKeyLen int) []byte {
- userKeyLen := randutil.RandIntInRange(r, 1, maxUserKeyLen)
- versionLen := possibleVersionLens[r.Intn(len(possibleVersionLens))]
- serializedLen := userKeyLen + versionLen + 1
- if versionLen > 0 {
- serializedLen++ // sentinel
- }
- k := randutil.RandBytes(r, serializedLen)
- k[userKeyLen] = 0x00
- if versionLen > 0 {
- k[len(k)-1] = byte(versionLen + 1)
- }
- return k
-}
diff --git a/pkg/storage/mvcc.go b/pkg/storage/mvcc.go
index bba040528197..e91bc761802a 100644
--- a/pkg/storage/mvcc.go
+++ b/pkg/storage/mvcc.go
@@ -3116,7 +3116,7 @@ func MVCCInitPut(
// confusing and redundant. See the comment on mvccPutInternal for details.
func MVCCBlindInitPut(
ctx context.Context,
- w Writer,
+ rw ReadWriter,
key roachpb.Key,
timestamp hlc.Timestamp,
value roachpb.Value,
@@ -3124,12 +3124,12 @@ func MVCCBlindInitPut(
opts MVCCWriteOptions,
) (roachpb.LockAcquisition, error) {
return mvccInitPutUsingIter(
- ctx, w, nil, nil, key, timestamp, value, failOnTombstones, opts)
+ ctx, rw, nil, nil, key, timestamp, value, failOnTombstones, opts)
}
func mvccInitPutUsingIter(
ctx context.Context,
- w Writer,
+ rw ReadWriter,
iter MVCCIterator,
ltScanner *lockTableKeyScanner,
key roachpb.Key,
@@ -3153,7 +3153,7 @@ func mvccInitPutUsingIter(
}
return value, nil
}
- return mvccPutUsingIter(ctx, w, iter, ltScanner, key, timestamp, noValue, valueFn, opts)
+ return mvccPutUsingIter(ctx, rw, iter, ltScanner, key, timestamp, noValue, valueFn, opts)
}
// mvccKeyFormatter is an fmt.Formatter for MVCC Keys.
diff --git a/pkg/storage/mvcc_value.go b/pkg/storage/mvcc_value.go
index 2e2ec9b802ad..fcf545c2591e 100644
--- a/pkg/storage/mvcc_value.go
+++ b/pkg/storage/mvcc_value.go
@@ -12,7 +12,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
- "github.com/cockroachdb/cockroach/pkg/util/buildutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/metamorphic"
"github.com/cockroachdb/errors"
@@ -210,9 +209,6 @@ func EncodeMVCCValueToBuf(v MVCCValue, buf []byte) ([]byte, bool, error) {
return v.Value.RawBytes, false, nil
}
- // NB: This code is duplicated in encodeExtendedMVCCValueToSizedBuf and
- // edits should be replicated there.
-
// Extended encoding. Wrap the roachpb.Value encoding with a header containing
// MVCC-level metadata. Requires a re-allocation and copy.
headerLen := v.MVCCValueHeader.Size()
@@ -243,38 +239,6 @@ func EncodeMVCCValueToBuf(v MVCCValue, buf []byte) ([]byte, bool, error) {
return buf, true, nil
}
-func mvccValueSize(v MVCCValue) (size int, extendedEncoding bool) {
- if v.MVCCValueHeader.IsEmpty() && !disableSimpleValueEncoding {
- return len(v.Value.RawBytes), false
- }
- return extendedPreludeSize + v.MVCCValueHeader.Size() + len(v.Value.RawBytes), true
-}
-
-// encodeExtendedMVCCValueToSizedBuf encodes an MVCCValue into its encoded form
-// in the provided buffer. The provided buf must be exactly sized, matching the
-// value returned by MVCCValue.encodedMVCCValueSize.
-//
-// See EncodeMVCCValueToBuf for detailed comments on the encoding scheme.
-func encodeExtendedMVCCValueToSizedBuf(v MVCCValue, buf []byte) error {
- if buildutil.CrdbTestBuild {
- if sz := encodedMVCCValueSize(v); sz != len(buf) {
- panic(errors.AssertionFailedf("provided buf (len=%d) is not sized correctly; expected %d", len(buf), sz))
- }
- }
- headerSize := len(buf) - len(v.Value.RawBytes)
- headerLen := headerSize - extendedPreludeSize
- binary.BigEndian.PutUint32(buf, uint32(headerLen))
- buf[tagPos] = extendedEncodingSentinel
- if _, err := v.MVCCValueHeader.MarshalToSizedBuffer(buf[extendedPreludeSize:headerSize]); err != nil {
- return errors.Wrap(err, "marshaling MVCCValueHeader")
- }
- if buildutil.CrdbTestBuild && len(buf[headerSize:]) != len(v.Value.RawBytes) {
- panic(errors.AssertionFailedf("insufficient space for raw value; expected %d, got %d", len(v.Value.RawBytes), len(buf[headerSize:])))
- }
- copy(buf[headerSize:], v.Value.RawBytes)
- return nil
-}
-
// DecodeMVCCValue decodes an MVCCKey from its Pebble representation.
//
// NOTE: this function does not inline, so it is not suitable for performance
diff --git a/pkg/storage/pebble.go b/pkg/storage/pebble.go
index ecfbffe286f0..3847dd316260 100644
--- a/pkg/storage/pebble.go
+++ b/pkg/storage/pebble.go
@@ -102,14 +102,15 @@ var IngestSplitEnabled = settings.RegisterBoolSetting(
settings.WithPublic,
)
+// TODO(radu): re-enable this setting when we are confident in the implementation.
// columnarBlocksEnabled controls whether columnar-blocks are enabled in Pebble.
-var columnarBlocksEnabled = settings.RegisterBoolSetting(
- settings.SystemVisible,
- "storage.columnar_blocks.enabled",
- "set to true to enable columnar-blocks to store KVs in a columnar format",
- false, // TODO(jackson): Metamorphicize this.
- settings.WithPublic,
-)
+//var columnarBlocksEnabled = settings.RegisterBoolSetting(
+// settings.SystemVisible,
+// "storage.columnar_blocks.enabled",
+// "set to true to enable columnar-blocks to store KVs in a columnar format",
+// false, // TODO(jackson): Metamorphicize this.
+// settings.WithPublic,
+//)
// deleteCompactionsCanExcise controls whether delete compactions can
// apply rangedels/rangekeydels on sstables they partially apply to, through
@@ -406,11 +407,9 @@ func ShouldUseEFOS(settings *settings.Values) bool {
return UseEFOS.Get(settings) || UseExciseForSnapshots.Get(settings)
}
-// EngineRangeSuffixCompare implements pebble.Comparer.CompareRangeSuffixes. It
-// compares cockroach suffixes (which are composed of the version and a trailing
-// sentinel byte); the version can be an MVCC timestamp or a lock key. It is
-// more strict than EnginePointSuffixCompare due to historical reasons; see
-// https://github.com/cockroachdb/cockroach/issues/130533
+// EngineRangeSuffixCompare implements pebble.Comparer.CompareSuffixes. It compares
+// cockroach suffixes (which are composed of the version and a trailing sentinel
+// byte); the version can be an MVCC timestamp or a lock key.
func EngineRangeSuffixCompare(a, b []byte) int {
if len(a) == 0 || len(b) == 0 {
// Empty suffixes sort before non-empty suffixes.
@@ -622,8 +621,9 @@ var EngineComparer = &pebble.Comparer{
Split: EngineKeySplit,
CompareRangeSuffixes: EngineRangeSuffixCompare,
ComparePointSuffixes: EnginePointSuffixCompare,
- Compare: EngineKeyCompare,
- Equal: EngineKeyEqual,
+
+ Compare: EngineKeyCompare,
+ Equal: EngineKeyEqual,
AbbreviatedKey: func(k []byte) uint64 {
key, ok := GetKeyPartFromEngineKey(k)
@@ -1241,7 +1241,9 @@ func newPebble(ctx context.Context, cfg engineConfig) (p *Pebble, err error) {
return IngestSplitEnabled.Get(&cfg.settings.SV)
}
cfg.opts.Experimental.EnableColumnarBlocks = func() bool {
- return columnarBlocksEnabled.Get(&cfg.settings.SV)
+ // TODO(radu): re-enable this setting when we are confident in the implementation.
+ // return columnarBlocksEnabled.Get(&cfg.settings.SV)
+ return false
}
cfg.opts.Experimental.EnableDeleteOnlyCompactionExcises = func() bool {
return deleteCompactionsCanExcise.Get(&cfg.settings.SV)
diff --git a/pkg/storage/pebble_batch.go b/pkg/storage/pebble_batch.go
index 930204c00d64..8d29ef951633 100644
--- a/pkg/storage/pebble_batch.go
+++ b/pkg/storage/pebble_batch.go
@@ -273,7 +273,11 @@ func (wb *writeBatch) PutMVCC(key MVCCKey, value MVCCValue) error {
if key.Timestamp.IsEmpty() {
panic("PutMVCC timestamp is empty")
}
- return wb.putMVCC(key, value)
+ encValue, err := EncodeMVCCValue(value)
+ if err != nil {
+ return err
+ }
+ return wb.put(key, encValue)
}
// PutRawMVCC implements the Writer interface.
@@ -298,41 +302,13 @@ func (wb *writeBatch) PutEngineKey(key EngineKey, value []byte) error {
return wb.batch.Set(wb.buf, value, nil)
}
-func (wb *writeBatch) putMVCC(key MVCCKey, value MVCCValue) error {
- // For performance, this method uses the pebble Batch's deferred operation
- // API to avoid an extra memcpy. We:
- // - determine the length of the encoded MVCC key and MVCC value
- // - reserve space in the pebble Batch using SetDeferred
- // - encode the MVCC key and MVCC value directly into the Batch
- // - call Finish on the deferred operation (which will index the key if
- // wb.batch is indexed)
- valueLen, isExtended := mvccValueSize(value)
- keyLen := encodedMVCCKeyLength(key)
- o := wb.batch.SetDeferred(keyLen, valueLen)
- encodeMVCCKeyToBuf(o.Key, key, keyLen)
- if !isExtended {
- // Fast path; we don't need to use the extended encoding and can copy
- // RawBytes in verbatim.
- copy(o.Value, value.Value.RawBytes)
- } else {
- // Slow path; we need the MVCC value header.
- err := encodeExtendedMVCCValueToSizedBuf(value, o.Value)
- if err != nil {
- return err
- }
- }
- return o.Finish()
-}
-
func (wb *writeBatch) put(key MVCCKey, value []byte) error {
if len(key.Key) == 0 {
return emptyKeyError()
}
- keyLen := encodedMVCCKeyLength(key)
- o := wb.batch.SetDeferred(keyLen, len(value))
- encodeMVCCKeyToBuf(o.Key, key, keyLen)
- copy(o.Value, value)
- return o.Finish()
+
+ wb.buf = EncodeMVCCKeyToBuf(wb.buf[:0], key)
+ return wb.batch.Set(wb.buf, value, nil)
}
// LogData implements the Writer interface.
diff --git a/pkg/storage/pebble_key_schema.go b/pkg/storage/pebble_key_schema.go
index 9f6e01889378..763a3d9e6bb0 100644
--- a/pkg/storage/pebble_key_schema.go
+++ b/pkg/storage/pebble_key_schema.go
@@ -11,7 +11,6 @@ import (
"encoding/binary"
"fmt"
"io"
- "strings"
"unsafe"
"github.com/cockroachdb/cockroach/pkg/util/buildutil"
@@ -39,8 +38,7 @@ const (
)
var keySchema = colblk.KeySchema{
- Name: "crdb1",
- HeaderSize: 1,
+ Name: "crdb1",
ColumnTypes: []colblk.DataType{
cockroachColRoachKey: colblk.DataTypePrefixBytes,
cockroachColMVCCWallTime: colblk.DataTypeUint,
@@ -59,44 +57,11 @@ var keySchema = colblk.KeySchema{
},
}
-// suffixTypes is a bitfield indicating what kind of suffixes are present in a
-// block.
-type suffixTypes uint8
-
-const (
- // hasMVCCSuffixes is set if there is at least one key with an MVCC suffix in
- // the block.
- hasMVCCSuffixes suffixTypes = (1 << iota)
- // hasEmptySuffixes is set if there is at least one key with no suffix in the block.
- hasEmptySuffixes
- // hasNonMVCCSuffixes is set if there is at least one key with a non-empty,
- // non-MVCC suffix.
- hasNonMVCCSuffixes
-)
-
-func (s suffixTypes) String() string {
- var suffixes []string
- if s&hasMVCCSuffixes != 0 {
- suffixes = append(suffixes, "mvcc")
- }
- if s&hasEmptySuffixes != 0 {
- suffixes = append(suffixes, "empty")
- }
- if s&hasNonMVCCSuffixes != 0 {
- suffixes = append(suffixes, "non-mvcc")
- }
- if len(suffixes) == 0 {
- return "none"
- }
- return strings.Join(suffixes, ",")
-}
-
type cockroachKeyWriter struct {
roachKeys colblk.PrefixBytesBuilder
wallTimes colblk.UintBuilder
logicalTimes colblk.UintBuilder
untypedVersions colblk.RawBytesBuilder
- suffixTypes suffixTypes
prevSuffix []byte
}
@@ -112,14 +77,6 @@ func makeCockroachKeyWriter() *cockroachKeyWriter {
return kw
}
-func (kw *cockroachKeyWriter) Reset() {
- kw.roachKeys.Reset()
- kw.wallTimes.Reset()
- kw.logicalTimes.Reset()
- kw.untypedVersions.Reset()
- kw.suffixTypes = 0
-}
-
func (kw *cockroachKeyWriter) ComparePrev(key []byte) colblk.KeyComparison {
var cmpv colblk.KeyComparison
cmpv.PrefixLen = int32(EngineKeySplit(key)) // TODO(jackson): Inline
@@ -177,20 +134,16 @@ func (kw *cockroachKeyWriter) WriteKey(
switch versionLen {
case 0:
// No-op.
- kw.suffixTypes |= hasEmptySuffixes
case 9:
- kw.suffixTypes |= hasMVCCSuffixes
wallTime = binary.BigEndian.Uint64(key[keyPrefixLen : keyPrefixLen+8])
case 13, 14:
- kw.suffixTypes |= hasMVCCSuffixes
wallTime = binary.BigEndian.Uint64(key[keyPrefixLen : keyPrefixLen+8])
kw.logicalTimes.Set(row, uint64(binary.BigEndian.Uint32(key[keyPrefixLen+8:keyPrefixLen+12])))
// NOTE: byte 13 used to store the timestamp's synthetic bit, but this is no
// longer consulted and can be ignored during decoding.
default:
// Not a MVCC timestamp.
- kw.suffixTypes |= hasNonMVCCSuffixes
- untypedVersion = key[keyPrefixLen : len(key)-1]
+ untypedVersion = key[keyPrefixLen:]
}
kw.wallTimes.Set(row, wallTime)
kw.untypedVersions.Put(untypedVersion)
@@ -202,7 +155,6 @@ func (kw *cockroachKeyWriter) MaterializeKey(dst []byte, i int) []byte {
dst = append(dst, 0)
if untypedVersion := kw.untypedVersions.UnsafeGet(i); len(untypedVersion) > 0 {
dst = append(dst, untypedVersion...)
- dst = append(dst, byte(len(untypedVersion)+1))
return dst
}
wall := kw.wallTimes.Get(i)
@@ -223,6 +175,13 @@ func (kw *cockroachKeyWriter) MaterializeKey(dst []byte, i int) []byte {
return dst
}
+func (kw *cockroachKeyWriter) Reset() {
+ kw.roachKeys.Reset()
+ kw.wallTimes.Reset()
+ kw.logicalTimes.Reset()
+ kw.untypedVersions.Reset()
+}
+
func (kw *cockroachKeyWriter) WriteDebug(dst io.Writer, rows int) {
fmt.Fprint(dst, "prefixes: ")
kw.roachKeys.WriteDebug(dst, rows)
@@ -236,8 +195,6 @@ func (kw *cockroachKeyWriter) WriteDebug(dst io.Writer, rows int) {
fmt.Fprint(dst, "untyped suffixes: ")
kw.untypedVersions.WriteDebug(dst, rows)
fmt.Fprintln(dst)
- fmt.Fprint(dst, "suffix types: ")
- fmt.Fprintln(dst, kw.suffixTypes.String())
}
func (kw *cockroachKeyWriter) NumColumns() int {
@@ -273,9 +230,7 @@ func (kw *cockroachKeyWriter) Finish(
}
}
-func (kw *cockroachKeyWriter) FinishHeader(buf []byte) {
- buf[0] = byte(kw.suffixTypes)
-}
+func (kw *cockroachKeyWriter) FinishHeader(buf []byte) {}
type cockroachKeySeeker struct {
roachKeys colblk.PrefixBytes
@@ -283,7 +238,6 @@ type cockroachKeySeeker struct {
mvccWallTimes colblk.UnsafeUints
mvccLogical colblk.UnsafeUints
untypedVersions colblk.RawBytes
- suffixTypes suffixTypes
}
// Assert that the cockroachKeySeeker fits inside KeySeekerMetadata.
@@ -298,14 +252,12 @@ func (ks *cockroachKeySeeker) init(d *colblk.DataBlockDecoder) {
ks.mvccWallTimes = bd.Uints(cockroachColMVCCWallTime)
ks.mvccLogical = bd.Uints(cockroachColMVCCLogical)
ks.untypedVersions = bd.RawBytes(cockroachColUntypedVersion)
- header := d.KeySchemaHeader()
- if len(header) != 1 {
- panic(errors.AssertionFailedf("invalid key schema-specific header %x", header))
- }
- ks.suffixTypes = suffixTypes(header[0])
}
-// IsLowerBound is part of the KeySeeker interface.
+// IsLowerBound compares the provided key to the first user key
+// contained within the data block. It's equivalent to performing
+//
+// Compare(firstUserKey, k) >= 0
func (ks *cockroachKeySeeker) IsLowerBound(k []byte, syntheticSuffix []byte) bool {
ek, ok := DecodeEngineKey(k)
if !ok {
@@ -367,117 +319,69 @@ func (ks *cockroachKeySeeker) SeekGE(
// with the same prefix as index and a suffix greater than or equal to [suffix],
// or if no such row exists, the next row with a different prefix.
func (ks *cockroachKeySeeker) seekGEOnSuffix(index int, seekSuffix []byte) (row int) {
- // We have three common cases:
- // 1. The seek key has no suffix.
- // 2. We are seeking to an MVCC timestamp in a block where all keys have
- // MVCC timestamps (e.g. SQL table data).
- // 3. We are seeking to a non-MVCC timestamp in a block where no keys have
- // MVCC timestamps (e.g. lock keys).
-
- if len(seekSuffix) == 0 {
- // The search key has no suffix, so it's the smallest possible key with its
- // prefix. Return the row. This is a common case where the user is seeking
- // to the most-recent row and just wants the smallest key with the prefix.
- return index
- }
-
+ // The search key's prefix exactly matches the prefix of the row at index.
const withWall = 9
const withLogical = withWall + 4
const withSynthetic = withLogical + 1
-
- // If suffixTypes == hasMVCCSuffixes, all keys in the block have MVCC
- // suffixes. Note that blocks that contain both MVCC and non-MVCC should be
- // very rare, so it's ok to use the more general path below in that case.
- if ks.suffixTypes == hasMVCCSuffixes && (len(seekSuffix) == withWall || len(seekSuffix) == withLogical || len(seekSuffix) == withSynthetic) {
- // Fast path: seeking among MVCC versions using a MVCC timestamp.
- seekWallTime := binary.BigEndian.Uint64(seekSuffix)
- var seekLogicalTime uint32
- if len(seekSuffix) >= withLogical {
- seekLogicalTime = binary.BigEndian.Uint32(seekSuffix[8:])
- }
-
- // First check the suffix at index, because querying for the latest value is
- // the most common case.
- if latestWallTime := ks.mvccWallTimes.At(index); latestWallTime < seekWallTime ||
- (latestWallTime == seekWallTime && uint32(ks.mvccLogical.At(index)) <= seekLogicalTime) {
- return index
- }
-
- // Binary search between [index+1, prefixChanged.SeekSetBitGE(index+1)].
+ var seekWallTime uint64
+ var seekLogicalTime uint32
+ switch len(seekSuffix) {
+ case 0:
+ // The search key has no suffix, so it's the smallest possible key with
+ // its prefix. Return the row. This is a common case where the user is
+ // seeking to the most-recent row and just wants the smallest key with
+ // the prefix.
+ return index
+ case withLogical, withSynthetic:
+ seekWallTime = binary.BigEndian.Uint64(seekSuffix)
+ seekLogicalTime = binary.BigEndian.Uint32(seekSuffix[8:])
+ case withWall:
+ seekWallTime = binary.BigEndian.Uint64(seekSuffix)
+ default:
+ // The suffix is untyped. Compare the untyped suffixes.
+ // Binary search between [index, prefixChanged.SeekSetBitGE(index+1)].
//
// Define f(i) = true iff key at i is >= seek key.
// Invariant: f(l-1) == false, f(u) == true.
- l := index + 1
+ l := index
u := ks.roachKeyChanged.SeekSetBitGE(index + 1)
-
for l < u {
- m := int(uint(l+u) >> 1) // avoid overflow when computing m
- // l ≤ m < u
- mWallTime := ks.mvccWallTimes.At(m)
- if mWallTime < seekWallTime ||
- (mWallTime == seekWallTime && uint32(ks.mvccLogical.At(m)) <= seekLogicalTime) {
- u = m // preserves f(u) = true
+ h := int(uint(l+u) >> 1) // avoid overflow when computing h
+ // l ≤ h < u
+ if bytes.Compare(ks.untypedVersions.At(h), seekSuffix) >= 0 {
+ u = h // preserves f(u) == true
} else {
- l = m + 1 // preserves f(l-1) = false
+ l = h + 1 // preserves f(l-1) == false
}
}
return l
}
+ // Seeking among MVCC versions using a MVCC timestamp.
- // Remove the terminator byte, which we know is equal to len(seekSuffix)
- // because we obtained the suffix by splitting the seek key.
- version := seekSuffix[:len(seekSuffix)-1]
- if buildutil.CrdbTestBuild && seekSuffix[len(version)] != byte(len(seekSuffix)) {
- panic(errors.AssertionFailedf("invalid seek suffix %x", seekSuffix))
+ // TODO(jackson): What if the row has an untyped suffix?
+
+ // First check the suffix at index, because querying for the latest value is
+ // the most common case.
+ if latestWallTime := ks.mvccWallTimes.At(index); latestWallTime < seekWallTime ||
+ (latestWallTime == seekWallTime && uint32(ks.mvccLogical.At(index)) <= seekLogicalTime) {
+ return index
}
- // Binary search for version between [index, prefixChanged.SeekSetBitGE(index+1)].
+ // Binary search between [index+1, prefixChanged.SeekSetBitGE(index+1)].
//
- // Define f(i) = true iff key at i is >= seek key (i.e. suffix at i is <= seek suffix).
+ // Define f(i) = true iff key at i is >= seek key.
// Invariant: f(l-1) == false, f(u) == true.
- l := index
+ l := index + 1
u := ks.roachKeyChanged.SeekSetBitGE(index + 1)
- if ks.suffixTypes&hasEmptySuffixes != 0 {
- // Check if the key at index has an empty suffix. Since empty suffixes sort
- // first, this is the only key in the range [index, u) which could have an
- // empty suffix.
- if len(ks.untypedVersions.At(index)) == 0 && ks.mvccWallTimes.At(index) == 0 && ks.mvccLogical.At(index) == 0 {
- // Our seek suffix is not empty, so it must come after the empty suffix.
- l = index + 1
- }
- }
-
for l < u {
- m := int(uint(l+u) >> 1) // avoid overflow when computing m
- // l ≤ m < u
- mVer := ks.untypedVersions.At(m)
- if len(mVer) == 0 {
- wallTime := ks.mvccWallTimes.At(m)
- logicalTime := uint32(ks.mvccLogical.At(m))
- if buildutil.CrdbTestBuild && wallTime == 0 && logicalTime == 0 {
- // This can only happen for row at index.
- panic(errors.AssertionFailedf("unexpected empty suffix at %d (l=%d, u=%d)", m, l, u))
- }
-
- // Note: this path is not very performance sensitive: blocks that mix MVCC
- // suffixes with non-MVCC suffixes should be rare.
-
- //gcassert:noescape
- var buf [12]byte
- //gcassert:inline
- binary.BigEndian.PutUint64(buf[:], wallTime)
- if logicalTime == 0 {
- mVer = buf[:8]
- } else {
- //gcassert:inline
- binary.BigEndian.PutUint32(buf[8:], logicalTime)
- mVer = buf[:12]
- }
- }
- if bytes.Compare(mVer, version) <= 0 {
- u = m // preserves f(u) == true
+ h := int(uint(l+u) >> 1) // avoid overflow when computing h
+ // l ≤ h < u
+ hWallTime := ks.mvccWallTimes.At(h)
+ if hWallTime < seekWallTime ||
+ (hWallTime == seekWallTime && uint32(ks.mvccLogical.At(h)) <= seekLogicalTime) {
+ u = h // preserves f(u) = true
} else {
- l = m + 1 // preserves f(l-1) == false
+ l = h + 1 // preserves f(l-1) = false
}
}
return l
@@ -487,9 +391,6 @@ func (ks *cockroachKeySeeker) seekGEOnSuffix(index int, seekSuffix []byte) (row
func (ks *cockroachKeySeeker) MaterializeUserKey(
ki *colblk.PrefixBytesIter, prevRow, row int,
) []byte {
- if buildutil.CrdbTestBuild && (row < 0 || row >= ks.roachKeys.Rows()) {
- panic(errors.AssertionFailedf("invalid row number %d", row))
- }
if prevRow+1 == row && prevRow >= 0 {
ks.roachKeys.SetNext(ki)
} else {
@@ -509,14 +410,13 @@ func (ks *cockroachKeySeeker) MaterializeUserKey(
return res
}
// Slice first, to check that the capacity is sufficient.
- res := ki.Buf[:roachKeyLen+2+len(untypedVersion)]
+ res := ki.Buf[:roachKeyLen+1+len(untypedVersion)]
*(*byte)(ptr) = 0
memmove(
unsafe.Pointer(uintptr(ptr)+1),
unsafe.Pointer(unsafe.SliceData(untypedVersion)),
uintptr(len(untypedVersion)),
)
- *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(len(untypedVersion)+1))) = byte(len(untypedVersion) + 1)
return res
}
diff --git a/pkg/storage/pebble_key_schema_test.go b/pkg/storage/pebble_key_schema_test.go
index 32b1af7e585f..eebec84965a3 100644
--- a/pkg/storage/pebble_key_schema_test.go
+++ b/pkg/storage/pebble_key_schema_test.go
@@ -10,7 +10,6 @@ import (
"encoding/hex"
"fmt"
"math/rand"
- "slices"
"strconv"
"strings"
"testing"
@@ -21,7 +20,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
- "github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/crlib/crbytes"
"github.com/cockroachdb/crlib/crstrings"
@@ -31,7 +29,6 @@ import (
"github.com/cockroachdb/pebble/sstable/block"
"github.com/cockroachdb/pebble/sstable/colblk"
"github.com/olekukonko/tablewriter"
- "github.com/stretchr/testify/require"
)
func TestKeySchema_KeyWriter(t *testing.T) {
@@ -310,48 +307,3 @@ func parseTestKey(s string) ([]byte, error) {
}), nil
}
}
-
-func TestKeySchema_RandomKeys(t *testing.T) {
- defer leaktest.AfterTest(t)()
- defer log.Scope(t).Close(t)
-
- rng, _ := randutil.NewTestRand()
- maxUserKeyLen := randutil.RandIntInRange(rng, 2, 10)
- keys := make([][]byte, randutil.RandIntInRange(rng, 1, 1000))
- for i := range keys {
- keys[i] = randomSerializedEngineKey(rng, maxUserKeyLen)
- }
- slices.SortFunc(keys, EngineKeyCompare)
-
- var enc colblk.DataBlockEncoder
- enc.Init(&keySchema)
- for i := range keys {
- ikey := pebble.InternalKey{
- UserKey: keys[i],
- Trailer: pebble.MakeInternalKeyTrailer(0, pebble.InternalKeyKindSet),
- }
- enc.Add(ikey, keys[i], block.InPlaceValuePrefix(false), enc.KeyWriter.ComparePrev(keys[i]), false /* isObsolete */)
- }
- blk, _ := enc.Finish(len(keys), enc.Size())
- blk = crbytes.CopyAligned(blk)
-
- var dec colblk.DataBlockDecoder
- dec.Init(&keySchema, blk)
- var it colblk.DataBlockIter
- it.InitOnce(&keySchema, EngineKeyCompare, EngineKeySplit, nil)
- require.NoError(t, it.Init(&dec, block.NoTransforms))
- for k, kv := 0, it.First(); kv != nil; k, kv = k+1, it.Next() {
- require.True(t, EngineKeyEqual(keys[k], kv.K.UserKey))
- require.Zero(t, EngineKeyCompare(keys[k], kv.K.UserKey))
- // Note we allow the key read from the block to be physically different,
- // because the above randomization generates point keys with the
- // synthetic bit encoding. However the materialized key should not be
- // longer than the original key, because we depend on the max key length
- // during writing bounding the key length during reading.
- if n := len(kv.K.UserKey); n > len(keys[k]) {
- t.Fatalf("key %q is longer than original key %q", kv.K.UserKey, keys[k])
- }
- checkEngineKey(kv.K.UserKey)
- }
- require.NoError(t, it.Close())
-}
diff --git a/pkg/storage/temp_engine.go b/pkg/storage/temp_engine.go
index 03f02b9d1f00..ec95751b4a1f 100644
--- a/pkg/storage/temp_engine.go
+++ b/pkg/storage/temp_engine.go
@@ -106,6 +106,8 @@ func newPebbleTempEngine(
// pebbleMap.makeKey and pebbleMap.makeKeyWithSequence on how this works.
// Use the default bytes.Compare-like comparer.
cfg.opts.Comparer = pebble.DefaultComparer
+ cfg.opts.KeySchemas = nil
+ cfg.opts.KeySchema = ""
cfg.opts.DisableWAL = true
cfg.opts.Experimental.KeyValidationFunc = nil
cfg.opts.BlockPropertyCollectors = nil
diff --git a/pkg/storage/testdata/key_schema_key_seeker b/pkg/storage/testdata/key_schema_key_seeker
index 09ffe8fbc18c..039eb4557ab0 100644
--- a/pkg/storage/testdata/key_schema_key_seeker
+++ b/pkg/storage/testdata/key_schema_key_seeker
@@ -147,25 +147,3 @@ MaterializeUserKey(-1, 3) = hex:6d6f6f0000000000b2d05e00000000010d
MaterializeUserKey(3, 2) = hex:666f6f0000000000b2d05e00000000010d
MaterializeUserKey(2, 0) = hex:6261720000000000b2d05e00000000010d
MaterializeUserKey(0, 1) = hex:6261780000000000b2d05e00000000010d
-
-define-block
-moo@3.000000001,0
-moo@3.000000000,2
-moo@3.000000000,1
-moo@3.000000000,0
-----
-Parse("moo@3.000000001,0") = hex:6d6f6f0000000000b2d05e0109
-Parse("moo@3.000000000,2") = hex:6d6f6f0000000000b2d05e00000000020d
-Parse("moo@3.000000000,1") = hex:6d6f6f0000000000b2d05e00000000010d
-Parse("moo@3.000000000,0") = hex:6d6f6f0000000000b2d05e0009
-
-materialize-user-key
-0
-1
-2
-3
-----
-MaterializeUserKey(-1, 0) = hex:6d6f6f0000000000b2d05e0109
-MaterializeUserKey(0, 1) = hex:6d6f6f0000000000b2d05e00000000020d
-MaterializeUserKey(1, 2) = hex:6d6f6f0000000000b2d05e00000000010d
-MaterializeUserKey(2, 3) = hex:6d6f6f0000000000b2d05e0009
diff --git a/pkg/storage/testdata/key_schema_key_writer b/pkg/storage/testdata/key_schema_key_writer
index 82a23e9f706f..535c20c771a5 100644
--- a/pkg/storage/testdata/key_schema_key_writer
+++ b/pkg/storage/testdata/key_schema_key_writer
@@ -98,10 +98,10 @@ Parse("/MVCC/poi@1.000000000,3") = hex:2f4d5643432f706f6900000000003b9aca0000000
finish
----
-+------------------------+------------+---------+------------------------------------+
-| KEY | WALL | LOGICAL | UNTYPED |
-+------------------------+------------+---------+------------------------------------+
-| hex:017a6b12706f690001 | 0 | 0 | 022a84b329b76b4616ac151047f0a3fe9c |
-| hex:017a6b12706f690001 | 0 | 0 | 02073a83c45688420eaf97824255790f1e |
-| /MVCC/poi | 1000000000 | 3 | |
-+------------------------+------------+---------+------------------------------------+
++------------------------+------------+---------+--------------------------------------+
+| KEY | WALL | LOGICAL | UNTYPED |
++------------------------+------------+---------+--------------------------------------+
+| hex:017a6b12706f690001 | 0 | 0 | 022a84b329b76b4616ac151047f0a3fe9c12 |
+| hex:017a6b12706f690001 | 0 | 0 | 02073a83c45688420eaf97824255790f1e12 |
+| /MVCC/poi | 1000000000 | 3 | |
++------------------------+------------+---------+--------------------------------------+
diff --git a/pkg/testutils/lint/gcassert_paths.txt b/pkg/testutils/lint/gcassert_paths.txt
index 92d15459b76f..c81c2780f7c6 100644
--- a/pkg/testutils/lint/gcassert_paths.txt
+++ b/pkg/testutils/lint/gcassert_paths.txt
@@ -34,4 +34,3 @@ util/admission
util/hlc
util/intsets
util/mon
-util/vector
diff --git a/pkg/testutils/release/cockroach_releases.yaml b/pkg/testutils/release/cockroach_releases.yaml
index 4054457ed7f7..233de7383a90 100644
--- a/pkg/testutils/release/cockroach_releases.yaml
+++ b/pkg/testutils/release/cockroach_releases.yaml
@@ -19,7 +19,7 @@
- 23.1.0
predecessor: "22.2"
"23.2":
- latest: 23.2.13
+ latest: 23.2.14
predecessor: "23.1"
"24.1":
latest: 24.1.6
diff --git a/pkg/testutils/serverutils/api.go b/pkg/testutils/serverutils/api.go
index bfbaa2b178a9..63652aff61ee 100644
--- a/pkg/testutils/serverutils/api.go
+++ b/pkg/testutils/serverutils/api.go
@@ -637,10 +637,6 @@ type StorageLayerInterface interface {
// The return value is of type *kvserver.RaftTransport.
RaftTransport() interface{}
- // StoreLivenessTransport provides access to the store liveness transport.
- // The return value is of type *storeliveness.Transport.
- StoreLivenessTransport() interface{}
-
// GetRangeLease returns information on the lease for the range
// containing key, and a timestamp taken from the node. The lease is
// returned regardless of its status.
diff --git a/pkg/testutils/serverutils/test_cluster_shim.go b/pkg/testutils/serverutils/test_cluster_shim.go
index 07ebfd427904..8f125b832ce8 100644
--- a/pkg/testutils/serverutils/test_cluster_shim.go
+++ b/pkg/testutils/serverutils/test_cluster_shim.go
@@ -43,6 +43,9 @@ type TestClusterInterface interface {
// ServerConn returns a gosql.DB connection to a specific node.
ServerConn(idx int) *gosql.DB
+ // Restart stops and then starts all servers in the cluster.
+ Restart() error
+
// StopServer stops a single server.
StopServer(idx int)
diff --git a/pkg/testutils/sqlutils/sql_runner.go b/pkg/testutils/sqlutils/sql_runner.go
index b24a03aacd1e..ce2959549f81 100644
--- a/pkg/testutils/sqlutils/sql_runner.go
+++ b/pkg/testutils/sqlutils/sql_runner.go
@@ -26,7 +26,7 @@ import (
// convenience functions to run SQL statements and fail the test on any errors.
type SQLRunner struct {
DB DBHandle
- SucceedsSoonDuration time.Duration // defaults to testutils.DefaultSucceedsSoonDuration
+ SucceedsSoonDuration time.Duration // defaults to testutils.DefaultSucceedsSoonDuration or testutils.RaceSucceedsSoonDuration
MaxTxnRetries int // defaults to 0 for unlimited retries
}
@@ -124,7 +124,7 @@ func (sr *SQLRunner) succeedsWithin(t Fataler, f func() error) {
helperOrNoop(t)()
d := sr.SucceedsSoonDuration
if d == 0 {
- d = testutils.DefaultSucceedsSoonDuration
+ d = testutils.SucceedsSoonDuration()
}
require.NoError(requireT{t}, testutils.SucceedsWithinError(f, d))
}
@@ -230,7 +230,7 @@ func (sr *SQLRunner) ExpectErrWithTimeout(
helperOrNoop(t)()
d := sr.SucceedsSoonDuration
if d == 0 {
- d = testutils.DefaultSucceedsSoonDuration
+ d = testutils.SucceedsSoonDuration()
}
err := timeutil.RunWithTimeout(context.Background(), "expect-err", d, func(ctx context.Context) error {
_, err := sr.DB.ExecContext(ctx, query, args...)
diff --git a/pkg/testutils/testcluster/testcluster.go b/pkg/testutils/testcluster/testcluster.go
index e705e463d366..cb88ed0a88ba 100644
--- a/pkg/testutils/testcluster/testcluster.go
+++ b/pkg/testutils/testcluster/testcluster.go
@@ -1126,7 +1126,7 @@ func (tc *TestCluster) MaybeWaitForLeaseUpgrade(
}
// WaitForLeaseUpgrade waits until the lease held for the given range descriptor
-// is upgraded to either a leader-lease or an epoch-based lease.
+// is upgraded to an epoch-based one.
func (tc *TestCluster) WaitForLeaseUpgrade(
ctx context.Context, t serverutils.TestFataler, desc roachpb.RangeDescriptor,
) roachpb.Lease {
@@ -1140,7 +1140,7 @@ func (tc *TestCluster) WaitForLeaseUpgrade(
if l.Type() == roachpb.LeaseExpiration {
return errors.Errorf("lease still an expiration based lease")
}
- t.Logf("lease is now of type: %s", l.Type())
+ require.Equal(t, int64(1), l.Epoch)
return nil
})
return l
diff --git a/pkg/ui/pnpm-lock.yaml b/pkg/ui/pnpm-lock.yaml
index 699721d95f9f..85a6d1277a36 100644
--- a/pkg/ui/pnpm-lock.yaml
+++ b/pkg/ui/pnpm-lock.yaml
@@ -55,6 +55,9 @@ importers:
'@babel/runtime':
specifier: ^7.12.13
version: 7.12.13
+ '@cockroachlabs/crdb-protobuf-client':
+ specifier: workspace:../db-console/src/js
+ version: link:../db-console/src/js
'@cockroachlabs/design-tokens':
specifier: 0.4.5
version: 0.4.5
@@ -224,9 +227,6 @@ importers:
'@bazel/worker':
specifier: 5.5.0
version: 5.5.0
- '@cockroachlabs/crdb-protobuf-client':
- specifier: workspace:../db-console/src/js
- version: link:../db-console/src/js
'@cockroachlabs/eslint-config':
specifier: 1.0.7
version: 1.0.7(@typescript-eslint/eslint-plugin@5.62.0)(eslint-plugin-prettier@5.2.1)(eslint-plugin-react-hooks@4.6.0)(eslint-plugin-react@7.34.1)(eslint@8.57.0)(typescript@5.1.6)
diff --git a/pkg/ui/workspaces/cluster-ui/package.json b/pkg/ui/workspaces/cluster-ui/package.json
index 2511d68a3e93..db6b1f69854b 100644
--- a/pkg/ui/workspaces/cluster-ui/package.json
+++ b/pkg/ui/workspaces/cluster-ui/package.json
@@ -1,6 +1,6 @@
{
"name": "@cockroachlabs/cluster-ui",
- "version": "24.3.0-prerelease.4",
+ "version": "24.3.0-prerelease.3",
"description": "Cluster UI is a library of large features shared between CockroachDB and CockroachCloud",
"repository": {
"type": "git",
@@ -33,6 +33,7 @@
"dependencies": {
"@ant-design/icons": "^5.3.6",
"@babel/runtime": "^7.12.13",
+ "@cockroachlabs/crdb-protobuf-client": "workspace:../db-console/src/js",
"@cockroachlabs/design-tokens": "0.4.5",
"@cockroachlabs/icons": "0.5.2",
"@cockroachlabs/ui-components": "0.4.3",
@@ -89,7 +90,6 @@
"@babel/preset-typescript": "^7.8.0",
"@bazel/typescript": "5.5.0",
"@bazel/worker": "5.5.0",
- "@cockroachlabs/crdb-protobuf-client": "workspace:../db-console/src/js",
"@cockroachlabs/eslint-config": "1.0.7",
"@cockroachlabs/eslint-plugin-crdb": "workspace:../eslint-plugin-crdb",
"@storybook/addon-actions": "^6.5.16",
diff --git a/pkg/ui/workspaces/cluster-ui/src/api/nodesApi.ts b/pkg/ui/workspaces/cluster-ui/src/api/nodesApi.ts
index efafac4d729b..e1831a077a58 100644
--- a/pkg/ui/workspaces/cluster-ui/src/api/nodesApi.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/api/nodesApi.ts
@@ -20,6 +20,11 @@ export const getNodes =
return fetchData(cockroach.server.serverpb.NodesResponse, NODES_PATH);
};
+export type NodeStatus = {
+ region: string;
+ stores: StoreID[];
+};
+
export const useNodeStatuses = () => {
const clusterDetails = useContext(ClusterDetailsContext);
const isTenant = clusterDetails.isTenant;
@@ -32,29 +37,31 @@ export const useNodeStatuses = () => {
},
);
- const { storeIDToNodeID, nodeIDToRegion } = useMemo(() => {
- const nodeIDToRegion: Record = {};
+ const { storeIDToNodeID, nodeStatusByID } = useMemo(() => {
+ const nodeStatusByID: Record = {};
const storeIDToNodeID: Record = {};
if (!data) {
- return { nodeIDToRegion, storeIDToNodeID };
+ return { nodeStatusByID, storeIDToNodeID };
}
- data.nodes.forEach(ns => {
- ns.store_statuses.forEach(store => {
+ data.nodes?.forEach(ns => {
+ ns.store_statuses?.forEach(store => {
storeIDToNodeID[store.desc.store_id as StoreID] = ns.desc
.node_id as NodeID;
});
- nodeIDToRegion[ns.desc.node_id as NodeID] = getRegionFromLocality(
- ns.desc.locality,
- );
+
+ nodeStatusByID[ns.desc.node_id as NodeID] = {
+ region: getRegionFromLocality(ns.desc.locality),
+ stores: ns.store_statuses?.map(s => s.desc.store_id as StoreID),
+ };
});
- return { nodeIDToRegion, storeIDToNodeID };
+
+ return { nodeStatusByID, storeIDToNodeID };
}, [data]);
return {
- data,
isLoading,
error,
- nodeIDToRegion,
+ nodeStatusByID,
storeIDToNodeID,
};
};
diff --git a/pkg/ui/workspaces/cluster-ui/src/components/nodeRegionsSelector/nodeRegionsSelector.spec.tsx b/pkg/ui/workspaces/cluster-ui/src/components/nodeRegionsSelector/nodeRegionsSelector.spec.tsx
index 51046fe0d06d..33242cce3434 100644
--- a/pkg/ui/workspaces/cluster-ui/src/components/nodeRegionsSelector/nodeRegionsSelector.spec.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/components/nodeRegionsSelector/nodeRegionsSelector.spec.tsx
@@ -3,54 +3,54 @@
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
+import { cockroach } from "@cockroachlabs/crdb-protobuf-client";
import { render, screen, fireEvent, waitFor } from "@testing-library/react";
import React from "react";
-import { useNodeStatuses } from "src/api";
-import { getRegionFromLocality } from "src/store/nodes";
-import { StoreID } from "src/types/clusterTypes";
+import * as api from "src/api/nodesApi";
+import { NodeID, StoreID } from "src/types/clusterTypes";
import { NodeRegionsSelector } from "./nodeRegionsSelector";
-// Mock the useNodeStatuses hook
-jest.mock("src/api", () => ({
- useNodeStatuses: jest.fn(),
-}));
+import NodesResponse = cockroach.server.serverpb.NodesResponse;
-// Mock the getRegionFromLocality function
-jest.mock("src/store/nodes", () => ({
- getRegionFromLocality: jest.fn(),
-}));
-
-const mockNodeData = {
+const mockNodeData = new NodesResponse({
nodes: [
{
- desc: { node_id: 1, locality: { region: "us-east" } },
+ desc: {
+ node_id: 1,
+ locality: { tiers: [{ key: "region", value: "us-east" }] },
+ },
store_statuses: [
{ desc: { store_id: 101 } },
{ desc: { store_id: 102 } },
],
},
{
- desc: { node_id: 2, locality: { region: "us-west" } },
+ desc: {
+ node_id: 2,
+ locality: { tiers: [{ key: "region", value: "us-west" }] },
+ },
store_statuses: [{ desc: { store_id: 201 } }],
},
{
- desc: { node_id: 3, locality: { region: "us-east" } },
+ desc: {
+ node_id: 3,
+ locality: { tiers: [{ key: "region", value: "us-east" }] },
+ },
store_statuses: [{ desc: { store_id: 301 } }],
},
],
-};
+});
describe("NodeRegionsSelector", () => {
beforeEach(() => {
- (useNodeStatuses as jest.Mock).mockReturnValue({
- isLoading: false,
- data: mockNodeData,
- });
- (getRegionFromLocality as jest.Mock).mockImplementation(
- locality => locality.region,
- );
+ // Mock the api.getNodes function at the module level
+ jest.spyOn(api, "getNodes").mockResolvedValue(mockNodeData);
+ });
+
+ afterEach(() => {
+ jest.clearAllMocks();
});
it("should render", () => {
@@ -103,9 +103,20 @@ describe("NodeRegionsSelector", () => {
});
it("handles loading state", () => {
- (useNodeStatuses as jest.Mock).mockReturnValue({
+ jest.spyOn(api, "useNodeStatuses").mockReturnValue({
+ error: null,
isLoading: true,
- data: mockNodeData,
+ nodeStatusByID: {
+ 1: { region: "us-east", stores: [101, 102] },
+ 2: { region: "us-west", stores: [201] },
+ 3: { region: "us-east", stores: [301] },
+ } as Record,
+ storeIDToNodeID: {
+ 101: 1,
+ 102: 1,
+ 201: 2,
+ 301: 3,
+ } as Record,
});
render( {}} />);
diff --git a/pkg/ui/workspaces/cluster-ui/src/components/nodeRegionsSelector/nodeRegionsSelector.tsx b/pkg/ui/workspaces/cluster-ui/src/components/nodeRegionsSelector/nodeRegionsSelector.tsx
index da8ab64d0a9d..93bad55f6cb1 100644
--- a/pkg/ui/workspaces/cluster-ui/src/components/nodeRegionsSelector/nodeRegionsSelector.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/components/nodeRegionsSelector/nodeRegionsSelector.tsx
@@ -7,7 +7,6 @@ import React, { useMemo } from "react";
import Select, { OptionsType } from "react-select";
import { useNodeStatuses } from "src/api";
-import { getRegionFromLocality } from "src/store/nodes";
import { NodeID, StoreID } from "src/types/clusterTypes";
import {
GroupedReactSelectOption,
@@ -23,22 +22,24 @@ export const NodeRegionsSelector: React.FC = ({
value,
onChange,
}) => {
- const nodesResp = useNodeStatuses();
+ const { nodeStatusByID, isLoading } = useNodeStatuses();
const nodeOptions: GroupedReactSelectOption[] = useMemo(() => {
const optionsMap: Record = {};
- if (nodesResp.isLoading && !nodesResp.data?.nodes) {
+ const nodes = Object.keys(nodeStatusByID ?? {});
+ if (isLoading && !nodes.length) {
return [];
}
- nodesResp.data.nodes.forEach(node => {
- const region = getRegionFromLocality(node.desc.locality);
+ nodes.forEach(node => {
+ const nid = parseInt(node) as NodeID;
+ const region = nodeStatusByID[nid].region;
if (optionsMap[region] == null) {
optionsMap[region] = [];
}
optionsMap[region].push({
- nid: node.desc.node_id as NodeID,
- sids: node.store_statuses.map(s => s.desc.store_id as StoreID),
+ nid,
+ sids: nodeStatusByID[nid].stores,
});
});
@@ -51,7 +52,7 @@ export const NodeRegionsSelector: React.FC = ({
})),
};
});
- }, [nodesResp]);
+ }, [nodeStatusByID, isLoading]);
const onSelectChange = (
selected: OptionsType>,
diff --git a/pkg/ui/workspaces/cluster-ui/src/components/regionNodesLabel/components/nodesList.tsx b/pkg/ui/workspaces/cluster-ui/src/components/regionNodesLabel/components/nodesList.tsx
index 87098a88ef25..d2dc07618dba 100644
--- a/pkg/ui/workspaces/cluster-ui/src/components/regionNodesLabel/components/nodesList.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/components/regionNodesLabel/components/nodesList.tsx
@@ -16,6 +16,10 @@ type Props = {
};
export const NodesList: React.FC = ({ nodes = [] }) => {
+ if (!nodes.length) {
+ return null;
+ }
+
const displayedNodes = nodes.slice(0, 4);
const hiddenNodes = nodes.length > 4 ? nodes.slice(4) : [];
return (
diff --git a/pkg/ui/workspaces/cluster-ui/src/components/regionNodesLabel/components/regionLabel.module.scss b/pkg/ui/workspaces/cluster-ui/src/components/regionNodesLabel/components/regionLabel.module.scss
index 4ed6fbd8705d..35f46e44a07c 100644
--- a/pkg/ui/workspaces/cluster-ui/src/components/regionNodesLabel/components/regionLabel.module.scss
+++ b/pkg/ui/workspaces/cluster-ui/src/components/regionNodesLabel/components/regionLabel.module.scss
@@ -17,7 +17,7 @@
.label-body {
background-color: $colors--neutral-3;
- padding: 0.1rem 0.5rem;
+ padding: 4px 8px;
border-radius: 0.375rem;
display: flex;
flex-direction: row;
diff --git a/pkg/ui/workspaces/cluster-ui/src/components/regionNodesLabel/regionNodesLabel.tsx b/pkg/ui/workspaces/cluster-ui/src/components/regionNodesLabel/regionNodesLabel.tsx
index 14647833b108..4e021f430aad 100644
--- a/pkg/ui/workspaces/cluster-ui/src/components/regionNodesLabel/regionNodesLabel.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/components/regionNodesLabel/regionNodesLabel.tsx
@@ -3,6 +3,7 @@
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
+import { Skeleton } from "antd";
import React from "react";
import { NodeID } from "src/types/clusterTypes";
@@ -12,14 +13,23 @@ import { RegionLabel } from "./components/regionLabel";
type RegionNodesLabelProps = {
nodesByRegion: Record;
+ loading?: boolean;
};
export const RegionNodesLabel: React.FC = ({
nodesByRegion = {},
+ loading,
}) => {
+ if (loading) {
+ return (
+
+ );
+ }
+
if (Object.keys(nodesByRegion).length === 1) {
return ;
}
+
return (
{Object.entries(nodesByRegion).map(([region, nodes]) => (
diff --git a/pkg/ui/workspaces/cluster-ui/src/components/tableMetadataLastUpdated/tableMetadataJobControl.tsx b/pkg/ui/workspaces/cluster-ui/src/components/tableMetadataLastUpdated/tableMetadataJobControl.tsx
index 170e7df71219..68503d7c7526 100644
--- a/pkg/ui/workspaces/cluster-ui/src/components/tableMetadataLastUpdated/tableMetadataJobControl.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/components/tableMetadataLastUpdated/tableMetadataJobControl.tsx
@@ -4,7 +4,7 @@
// included in the /LICENSE file.
import { LoadingOutlined, RedoOutlined } from "@ant-design/icons";
-import { Skeleton, Spin } from "antd";
+import { Spin } from "antd";
import React, { useCallback, useEffect } from "react";
import {
@@ -91,13 +91,12 @@ export const TableMetadataJobControl: React.FC<
return (
-
-
- {durationText => Last refreshed: {durationText}
}
-
-
+
+ {durationText => <>Last refreshed: {durationText}>}
+
<>
React.ReactNode;
- errorMessage?: string;
-};
-
const formatErrorMessage = (
errorMessage: string | null,
lastUpdatedTime: moment.Moment | null,
@@ -49,12 +40,35 @@ const formatErrorMessage = (
);
};
+type Props = {
+ timestamp?: moment.Moment | null;
+ children: (
+ formattedRelativeTime: React.ReactNode,
+ icon?: JSX.Element,
+ ) => React.ReactNode;
+ errorMessage?: string;
+ loading?: boolean;
+};
+
export const TableMetadataLastUpdatedTooltip = ({
timestamp,
errorMessage,
children,
+ loading,
}: Props) => {
- const durationText = timestamp?.fromNow() ?? "Never";
+ const duration = (
+
+
+ {timestamp?.fromNow() ?? "Never"}
+
+
+ );
+
const icon = errorMessage ? (
) : (
@@ -83,7 +97,7 @@ export const TableMetadataLastUpdatedTooltip = ({
}
>
- {children(durationText, icon)}
+ {children(duration, icon)}
);
diff --git a/pkg/ui/workspaces/cluster-ui/src/databaseDetailsV2/tablesView.tsx b/pkg/ui/workspaces/cluster-ui/src/databaseDetailsV2/tablesView.tsx
index f727090ddf10..659d3afe57f4 100644
--- a/pkg/ui/workspaces/cluster-ui/src/databaseDetailsV2/tablesView.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/databaseDetailsV2/tablesView.tsx
@@ -123,7 +123,10 @@ const COLUMNS: (TableColumnProps
& {
hideIfTenant: true,
width: "fit-content",
render: (t: TableRow) => (
-
+
),
},
{
@@ -240,13 +243,13 @@ export const TablesPageV2 = () => {
const tableData = useMemo(
() =>
tableMetadataToRows(tableList ?? [], {
- nodeIDToRegion: nodesResp.nodeIDToRegion,
+ nodeStatusByID: nodesResp.nodeStatusByID,
storeIDToNodeID: nodesResp.storeIDToNodeID,
isLoading: nodesResp.isLoading,
}),
[
tableList,
- nodesResp.nodeIDToRegion,
+ nodesResp.nodeStatusByID,
nodesResp.storeIDToNodeID,
nodesResp.isLoading,
],
diff --git a/pkg/ui/workspaces/cluster-ui/src/databaseDetailsV2/types.ts b/pkg/ui/workspaces/cluster-ui/src/databaseDetailsV2/types.ts
index bc6249fcc86b..0207e37c5637 100644
--- a/pkg/ui/workspaces/cluster-ui/src/databaseDetailsV2/types.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/databaseDetailsV2/types.ts
@@ -8,6 +8,9 @@ import { NodeID } from "src/types/clusterTypes";
export type TableRow = TableMetadata & {
qualifiedNameWithSchema: string;
- nodesByRegion: Record;
+ nodesByRegion: {
+ isLoading: boolean;
+ data: Record;
+ };
key: string;
};
diff --git a/pkg/ui/workspaces/cluster-ui/src/databaseDetailsV2/utils.tsx b/pkg/ui/workspaces/cluster-ui/src/databaseDetailsV2/utils.tsx
index 6aa92998738a..53b0de439cbc 100644
--- a/pkg/ui/workspaces/cluster-ui/src/databaseDetailsV2/utils.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/databaseDetailsV2/utils.tsx
@@ -3,6 +3,7 @@
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
+import { NodeStatus } from "src/api";
import { TableMetadata } from "src/api/databases/getTableMetadataApi";
import { NodeID, StoreID } from "src/types/clusterTypes";
import { mapStoreIDsToNodeRegions } from "src/util/nodeUtils";
@@ -12,7 +13,7 @@ import { TableRow } from "./types";
export const tableMetadataToRows = (
tables: TableMetadata[],
nodesInfo: {
- nodeIDToRegion: Record;
+ nodeStatusByID: Record;
storeIDToNodeID: Record;
isLoading: boolean;
},
@@ -20,12 +21,15 @@ export const tableMetadataToRows = (
return tables.map(table => {
const nodesByRegion = mapStoreIDsToNodeRegions(
table.storeIds,
- nodesInfo?.nodeIDToRegion,
+ nodesInfo?.nodeStatusByID,
nodesInfo?.storeIDToNodeID,
);
return {
...table,
- nodesByRegion: nodesByRegion,
+ nodesByRegion: {
+ isLoading: nodesInfo.isLoading,
+ data: nodesByRegion,
+ },
key: table.tableId.toString(),
qualifiedNameWithSchema: `${table.schemaName}.${table.tableName}`,
};
diff --git a/pkg/ui/workspaces/cluster-ui/src/databasesV2/index.tsx b/pkg/ui/workspaces/cluster-ui/src/databasesV2/index.tsx
index 417f2d3270e2..7a8dc5bec69d 100644
--- a/pkg/ui/workspaces/cluster-ui/src/databasesV2/index.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/databasesV2/index.tsx
@@ -3,7 +3,7 @@
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
-import { Row, Skeleton } from "antd";
+import { Row } from "antd";
import React, { useContext, useMemo } from "react";
import { Link } from "react-router-dom";
@@ -99,9 +99,10 @@ const COLUMNS: (TableColumnProps & {
hideIfTenant: true,
width: "fit-content",
render: (db: DatabaseRow) => (
-
-
-
+
),
},
];
@@ -159,7 +160,7 @@ export const DatabasesPageV2 = () => {
const tableData = useMemo(
() =>
rawDatabaseMetadataToDatabaseRows(data?.results ?? [], {
- nodeIDToRegion: nodesResp.nodeIDToRegion,
+ nodeStatusByID: nodesResp.nodeStatusByID,
storeIDToNodeID: nodesResp.storeIDToNodeID,
isLoading: nodesResp.isLoading,
}),
@@ -205,13 +206,13 @@ export const DatabasesPageV2 = () => {
+ !settingsLoading && (
-
+ )
}
/>
diff --git a/pkg/ui/workspaces/cluster-ui/src/databasesV2/utils.ts b/pkg/ui/workspaces/cluster-ui/src/databasesV2/utils.ts
index ec3caa6f6474..eb8bfcca3828 100644
--- a/pkg/ui/workspaces/cluster-ui/src/databasesV2/utils.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/databasesV2/utils.ts
@@ -4,6 +4,7 @@
// included in the /LICENSE file.
import { DatabaseMetadata } from "src/api/databases/getDatabaseMetadataApi";
+import { NodeStatus } from "src/api/nodesApi";
import { NodeID, StoreID } from "src/types/clusterTypes";
import { mapStoreIDsToNodeRegions } from "src/util/nodeUtils";
@@ -12,7 +13,7 @@ import { DatabaseRow } from "./databaseTypes";
export const rawDatabaseMetadataToDatabaseRows = (
raw: DatabaseMetadata[],
nodesInfo: {
- nodeIDToRegion: Record;
+ nodeStatusByID: Record;
storeIDToNodeID: Record;
isLoading: boolean;
},
@@ -20,7 +21,7 @@ export const rawDatabaseMetadataToDatabaseRows = (
return raw.map((db: DatabaseMetadata): DatabaseRow => {
const nodesByRegion = mapStoreIDsToNodeRegions(
db.storeIds,
- nodesInfo?.nodeIDToRegion,
+ nodesInfo?.nodeStatusByID,
nodesInfo?.storeIDToNodeID,
);
return {
diff --git a/pkg/ui/workspaces/cluster-ui/src/graphs/visualization/visualizations.module.scss b/pkg/ui/workspaces/cluster-ui/src/graphs/visualization/visualizations.module.scss
index 988fd2ce6907..fa3379dd7660 100644
--- a/pkg/ui/workspaces/cluster-ui/src/graphs/visualization/visualizations.module.scss
+++ b/pkg/ui/workspaces/cluster-ui/src/graphs/visualization/visualizations.module.scss
@@ -20,11 +20,15 @@
.visualization-content {
padding: 0 10px 15px;
- .visualization-loading {
- display: flex;
- justify-content: center;
- align-items: center;
- }
+}
+
+.visualization-loading {
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ // These are the dimensions of the linegraph.
+ min-width: 947px;
+ min-height: 300px;
}
.visualization-header {
diff --git a/pkg/ui/workspaces/cluster-ui/src/loading/loading.module.scss b/pkg/ui/workspaces/cluster-ui/src/loading/loading.module.scss
index 9fb28f071c8f..cb0e34f0067f 100644
--- a/pkg/ui/workspaces/cluster-ui/src/loading/loading.module.scss
+++ b/pkg/ui/workspaces/cluster-ui/src/loading/loading.module.scss
@@ -7,6 +7,7 @@
.loading-indicator {
margin: $spacing-medium auto;
+ display: block;
width: 100%;
}
diff --git a/pkg/ui/workspaces/cluster-ui/src/loading/loading.tsx b/pkg/ui/workspaces/cluster-ui/src/loading/loading.tsx
index 95af0e21f85b..055f058b4ff7 100644
--- a/pkg/ui/workspaces/cluster-ui/src/loading/loading.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/loading/loading.tsx
@@ -141,7 +141,9 @@ export const Loading = (props: React.PropsWithChildren) => {
}
if (props.loading) {
return (
-
+
+
+
);
}
return (
diff --git a/pkg/ui/workspaces/cluster-ui/src/tableDetailsV2/tableOverview.tsx b/pkg/ui/workspaces/cluster-ui/src/tableDetailsV2/tableOverview.tsx
index ee719485f83f..3222463156d8 100644
--- a/pkg/ui/workspaces/cluster-ui/src/tableDetailsV2/tableOverview.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/tableDetailsV2/tableOverview.tsx
@@ -29,7 +29,7 @@ export const TableOverview: React.FC = ({
const isTenant = clusterDetails.isTenant;
const metadata = tableDetails.metadata;
const {
- nodeIDToRegion,
+ nodeStatusByID,
storeIDToNodeID,
isLoading: nodesLoading,
} = useNodeStatuses();
@@ -42,7 +42,7 @@ export const TableOverview: React.FC = ({
}
const regionsToNodes = mapStoreIDsToNodeRegions(
tableDetails.metadata.storeIds,
- nodeIDToRegion,
+ nodeStatusByID,
storeIDToNodeID,
);
return Object.entries(regionsToNodes)
@@ -89,7 +89,7 @@ export const TableOverview: React.FC = ({
+
{getNodesByRegionDisplayStr()}
}
diff --git a/pkg/ui/workspaces/cluster-ui/src/util/nodeUtils.spec.ts b/pkg/ui/workspaces/cluster-ui/src/util/nodeUtils.spec.ts
index e46166285edf..c54f25f8a9aa 100644
--- a/pkg/ui/workspaces/cluster-ui/src/util/nodeUtils.spec.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/util/nodeUtils.spec.ts
@@ -3,7 +3,8 @@
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
-import { StoreID } from "src/types/clusterTypes";
+import { NodeStatus } from "src/api";
+import { NodeID, StoreID } from "src/types/clusterTypes";
import { mapStoreIDsToNodeRegions } from "./nodeUtils";
@@ -12,12 +13,12 @@ describe("nodeUtils", () => {
it("should return a mapping of regions to the nodes that are present in that region based on the provided storeIDs", () => {
const stores = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] as StoreID[];
const clusterNodeIDToRegion = {
- 1: "region1",
- 2: "region2",
- 3: "region1",
- 4: "region2",
- 5: "region1",
- };
+ 1: { region: "region1", stores: [1, 6] },
+ 2: { region: "region2", stores: [2, 7] },
+ 3: { region: "region1", stores: [3, 8] },
+ 4: { region: "region2", stores: [4, 9] },
+ 5: { region: "region1", stores: [5, 10] },
+ } as Record;
const clusterStoreIDToNodeID = {
1: 1,
2: 2,
diff --git a/pkg/ui/workspaces/cluster-ui/src/util/nodeUtils.ts b/pkg/ui/workspaces/cluster-ui/src/util/nodeUtils.ts
index c3b39ca09668..390243e6ff93 100644
--- a/pkg/ui/workspaces/cluster-ui/src/util/nodeUtils.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/util/nodeUtils.ts
@@ -3,6 +3,7 @@
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
+import { NodeStatus } from "src/api";
import { NodeID, StoreID } from "src/types/clusterTypes";
// mapStoreIDsToNodeRegions creates a mapping of regions
@@ -10,7 +11,7 @@ import { NodeID, StoreID } from "src/types/clusterTypes";
// the provided storeIDs.
export const mapStoreIDsToNodeRegions = (
stores: StoreID[],
- clusterNodeIDToRegion: Record = {},
+ clusterNodeIDToRegion: Record = {},
clusterStoreIDToNodeID: Record = {},
): Record => {
const nodes = stores.reduce((acc, storeID) => {
@@ -20,7 +21,7 @@ export const mapStoreIDsToNodeRegions = (
const nodesByRegion: Record = {};
nodes.forEach(nodeID => {
- const region = clusterNodeIDToRegion[nodeID];
+ const region = clusterNodeIDToRegion[nodeID]?.region;
if (!nodesByRegion[region]) {
nodesByRegion[region] = [];
}
diff --git a/pkg/ui/workspaces/db-console/BUILD.bazel b/pkg/ui/workspaces/db-console/BUILD.bazel
index 5adcba658453..1646ad68d44b 100644
--- a/pkg/ui/workspaces/db-console/BUILD.bazel
+++ b/pkg/ui/workspaces/db-console/BUILD.bazel
@@ -89,7 +89,6 @@ webpack_bin.webpack_cli(
"db-console-ccl/assets/login-background.png",
"db-console-ccl/assets/not-found.svg",
"db-console-ccl/assets/questionMap.svg",
- "db-console-ccl/assets/spinner.gif",
],
args = [
"--config webpack.config.js",
@@ -140,7 +139,6 @@ webpack_bin.webpack_cli(
"db-console-oss/assets/heroBannerLp.png",
"db-console-oss/assets/login-background.png",
"db-console-oss/assets/not-found.svg",
- "db-console-oss/assets/spinner.gif",
],
args = [
"--config webpack.config.js",
diff --git a/pkg/ui/workspaces/db-console/assets/spinner.gif b/pkg/ui/workspaces/db-console/assets/spinner.gif
deleted file mode 100644
index 83edb0a01ee1..000000000000
Binary files a/pkg/ui/workspaces/db-console/assets/spinner.gif and /dev/null differ
diff --git a/pkg/ui/workspaces/db-console/src/redux/alerts.spec.ts b/pkg/ui/workspaces/db-console/src/redux/alerts.spec.ts
index 9244aea5edf3..53cbeb9d2820 100644
--- a/pkg/ui/workspaces/db-console/src/redux/alerts.spec.ts
+++ b/pkg/ui/workspaces/db-console/src/redux/alerts.spec.ts
@@ -12,7 +12,6 @@ import * as protos from "src/js/protos";
import { cockroach } from "src/js/protos";
import { versionsSelector } from "src/redux/nodes";
import { API_PREFIX } from "src/util/api";
-import { setDataFromServer } from "src/util/dataFromServer";
import fetchMock from "src/util/fetch-mock";
import {
@@ -28,7 +27,6 @@ import {
emailSubscriptionAlertSelector,
clusterPreserveDowngradeOptionDismissedSetting,
clusterPreserveDowngradeOptionOvertimeSelector,
- licenseUpdateNotificationSelector,
} from "./alerts";
import {
livenessReducerObj,
@@ -43,7 +41,6 @@ import { AdminUIState, AppDispatch, createAdminUIStore } from "./state";
import {
VERSION_DISMISSED_KEY,
INSTRUCTIONS_BOX_COLLAPSED_KEY,
- LICENSE_UPDATE_DISMISSED_KEY,
setUIDataKey,
isInFlight,
} from "./uiData";
@@ -256,31 +253,6 @@ describe("alerts", function () {
});
});
- describe("licence update notification", function () {
- it("displays the alert when nothing is done", function () {
- dispatch(setUIDataKey(LICENSE_UPDATE_DISMISSED_KEY, null));
- const alert = licenseUpdateNotificationSelector(state());
- expect(typeof alert).toBe("object");
- expect(alert.level).toEqual(AlertLevel.INFORMATION);
- expect(alert.text).toEqual(
- "Important changes to CockroachDB’s licensing model.",
- );
- });
-
- it("hides the alert when dismissed timestamp is present", function () {
- dispatch(setUIDataKey(LICENSE_UPDATE_DISMISSED_KEY, moment()));
- expect(licenseUpdateNotificationSelector(state())).toBeUndefined();
- });
-
- it("hides the alert when license is enterprise", function () {
- dispatch(setUIDataKey(LICENSE_UPDATE_DISMISSED_KEY, null));
- setDataFromServer({
- LicenseType: "Enterprise",
- } as any);
- expect(licenseUpdateNotificationSelector(state())).toBeUndefined();
- });
- });
-
describe("new version available notification", function () {
it("displays nothing when versions have not yet been loaded", function () {
dispatch(setUIDataKey(VERSION_DISMISSED_KEY, null));
@@ -653,7 +625,6 @@ describe("alerts", function () {
);
dispatch(setUIDataKey(VERSION_DISMISSED_KEY, "blank"));
dispatch(setUIDataKey(INSTRUCTIONS_BOX_COLLAPSED_KEY, false));
- dispatch(setUIDataKey(LICENSE_UPDATE_DISMISSED_KEY, moment()));
dispatch(
versionReducerObj.receiveData({
details: [],
diff --git a/pkg/ui/workspaces/db-console/src/redux/alerts.ts b/pkg/ui/workspaces/db-console/src/redux/alerts.ts
index 132e14360c45..f37df5b3a423 100644
--- a/pkg/ui/workspaces/db-console/src/redux/alerts.ts
+++ b/pkg/ui/workspaces/db-console/src/redux/alerts.ts
@@ -42,7 +42,6 @@ import {
import { LocalSetting } from "./localsettings";
import { AdminUIState, AppDispatch } from "./state";
import {
- LICENSE_UPDATE_DISMISSED_KEY,
VERSION_DISMISSED_KEY,
INSTRUCTIONS_BOX_COLLAPSED_KEY,
saveUIData,
@@ -703,85 +702,6 @@ export const licenseUpdateDismissedLocalSetting = new LocalSetting(
moment(0),
);
-const licenseUpdateDismissedPersistentLoadedSelector = createSelector(
- (state: AdminUIState) => state.uiData,
- uiData =>
- uiData &&
- Object.prototype.hasOwnProperty.call(uiData, LICENSE_UPDATE_DISMISSED_KEY),
-);
-
-const licenseUpdateDismissedPersistentSelector = createSelector(
- (state: AdminUIState) => state.uiData,
- uiData => moment(uiData?.[LICENSE_UPDATE_DISMISSED_KEY]?.data ?? 0),
-);
-
-export const licenseUpdateNotificationSelector = createSelector(
- licenseTypeSelector,
- licenseUpdateDismissedLocalSetting.selector,
- licenseUpdateDismissedPersistentSelector,
- licenseUpdateDismissedPersistentLoadedSelector,
- (
- licenseType,
- licenseUpdateDismissed,
- licenseUpdateDismissedPersistent,
- licenseUpdateDismissedPersistentLoaded,
- ): Alert => {
- // If customer has Enterprise license they don't need to worry about this.
- if (licenseType === "Enterprise") {
- return undefined;
- }
-
- // If the notification has been dismissed based on the session storage
- // timestamp, don't show it.'
- //
- // Note: `licenseUpdateDismissed` is wrapped in `moment()` because
- // the local storage selector won't convert it back from a string.
- // We omit fixing that here since this change is being backported
- // to many versions.
- if (moment(licenseUpdateDismissed).isAfter(moment(0))) {
- return undefined;
- }
-
- // If the notification has been dismissed based on the uiData
- // storage in the cluster, don't show it. Note that this is
- // different from how version upgrade notifications work, this one
- // is dismissed forever and won't return even if you upgrade
- // further or time passes.
- if (
- licenseUpdateDismissedPersistentLoaded &&
- licenseUpdateDismissedPersistent &&
- licenseUpdateDismissedPersistent.isAfter(moment(0))
- ) {
- return undefined;
- }
-
- return {
- level: AlertLevel.INFORMATION,
- title: "Coming November 18, 2024",
- text: "Important changes to CockroachDB’s licensing model.",
- link: docsURL.enterpriseLicenseUpdate,
- dismiss: (dispatch: any) => {
- const dismissedAt = moment();
- // Note(davidh): I haven't been able to find historical context
- // for why some alerts have both a "local" and a "persistent"
- // dismissal. My thinking is that just the persistent dismissal
- // should be adequate, but I'm preserving that behavior here to
- // match the version upgrade notification.
-
- // Dismiss locally.
- dispatch(licenseUpdateDismissedLocalSetting.set(dismissedAt));
- // Dismiss persistently.
- return dispatch(
- saveUIData({
- key: LICENSE_UPDATE_DISMISSED_KEY,
- value: dismissedAt.valueOf(),
- }),
- );
- },
- };
- },
-);
-
/**
* Selector which returns an array of all active alerts which should be
* displayed in the overview list page, these should be non-critical alerts.
@@ -791,7 +711,6 @@ export const overviewListAlertsSelector = createSelector(
staggeredVersionWarningSelector,
clusterPreserveDowngradeOptionOvertimeSelector,
upgradeNotFinalizedWarningSelector,
- licenseUpdateNotificationSelector,
(...alerts: Alert[]): Alert[] => {
return without(alerts, null, undefined);
},
@@ -870,7 +789,6 @@ export function alertDataSync(store: Store) {
const keysToMaybeLoad = [
VERSION_DISMISSED_KEY,
INSTRUCTIONS_BOX_COLLAPSED_KEY,
- LICENSE_UPDATE_DISMISSED_KEY,
];
const keysToLoad = filter(keysToMaybeLoad, key => {
return !(has(uiData, key) || isInFlight(state, key));
diff --git a/pkg/ui/workspaces/db-console/src/redux/uiData.ts b/pkg/ui/workspaces/db-console/src/redux/uiData.ts
index 1b15142f675b..883b53310aca 100644
--- a/pkg/ui/workspaces/db-console/src/redux/uiData.ts
+++ b/pkg/ui/workspaces/db-console/src/redux/uiData.ts
@@ -57,11 +57,6 @@ export class OptInAttributes {
// was last dismissed.
export const VERSION_DISMISSED_KEY = "version_dismissed";
-// LICENSE_UPDATE_DISMISSED_KEY is the uiData key on the server that tracks when the licence
-// update banner was last dismissed. This banner notifies the user that we've changed our
-// licensing if they're deployed without an active license.
-export const LICENSE_UPDATE_DISMISSED_KEY = "license_update_dismissed";
-
// INSTRUCTIONS_BOX_COLLAPSED_KEY is the uiData key on the server that tracks whether the
// instructions box on the cluster viz has been collapsed or not.
export const INSTRUCTIONS_BOX_COLLAPSED_KEY =
diff --git a/pkg/ui/workspaces/db-console/src/util/docs.ts b/pkg/ui/workspaces/db-console/src/util/docs.ts
index eefef41c8f92..de1447f6e34e 100644
--- a/pkg/ui/workspaces/db-console/src/util/docs.ts
+++ b/pkg/ui/workspaces/db-console/src/util/docs.ts
@@ -59,8 +59,6 @@ export let licensingFaqs: string;
export let throttlingFaqs: string;
// Note that these explicitly don't use the current version, since we want to
// link to the most up-to-date documentation available.
-export const enterpriseLicenseUpdate =
- "https://www.cockroachlabs.com/enterprise-license-update/";
export const upgradeCockroachVersion =
"https://www.cockroachlabs.com/docs/stable/upgrade-cockroach-version.html";
export const enterpriseLicensing =
diff --git a/pkg/ui/workspaces/db-console/src/views/cluster/containers/clusterOverview/index.tsx b/pkg/ui/workspaces/db-console/src/views/cluster/containers/clusterOverview/index.tsx
index 3f1e7e27331f..448daf4cbbce 100644
--- a/pkg/ui/workspaces/db-console/src/views/cluster/containers/clusterOverview/index.tsx
+++ b/pkg/ui/workspaces/db-console/src/views/cluster/containers/clusterOverview/index.tsx
@@ -4,6 +4,7 @@
// included in the /LICENSE file.
import { util } from "@cockroachlabs/cluster-ui";
+import { Skeleton } from "antd";
import classNames from "classnames";
import d3 from "d3";
import React from "react";
@@ -11,7 +12,6 @@ import { Helmet } from "react-helmet";
import { connect } from "react-redux";
import { createSelector } from "reselect";
-import spinner from "assets/spinner.gif";
import { refreshNodes, refreshLiveness } from "src/redux/apiReducers";
import { nodeSumsSelector } from "src/redux/nodes";
import { AdminUIState } from "src/redux/state";
@@ -277,21 +277,22 @@ class ClusterSummary extends React.Component {
}
render() {
- const children = [];
-
- if (this.props.loading) {
- children.push( );
- } else {
- children.push(
- ...renderCapacityUsage(this.props.capacityUsage),
- ...renderNodeLiveness(this.props.nodeLiveness),
- ...renderReplicationStatus(this.props.replicationStatus),
- );
- }
+ const children = [
+ ...renderCapacityUsage(this.props.capacityUsage),
+ ...renderNodeLiveness(this.props.nodeLiveness),
+ ...renderReplicationStatus(this.props.replicationStatus),
+ ];
return (
- {React.Children.toArray(children)}
+
+ {React.Children.toArray(children)}
+
);
}
diff --git a/pkg/ui/workspaces/db-console/src/views/shared/components/alertBar/alertBar.spec.tsx b/pkg/ui/workspaces/db-console/src/views/shared/components/alertBar/alertBar.spec.tsx
index 77cb64fb7de3..414b37388ceb 100644
--- a/pkg/ui/workspaces/db-console/src/views/shared/components/alertBar/alertBar.spec.tsx
+++ b/pkg/ui/workspaces/db-console/src/views/shared/components/alertBar/alertBar.spec.tsx
@@ -75,7 +75,7 @@ describe("AlertBar", () => {
expect(wrapper.text()).toContain(
"Your license key expired on September 15th, 2024. " +
- `The cluster will be throttled on ${gracePeriodEnd.format("MMMM Do, YYYY")} unless the license is renewed. Learn more`,
+ `The cluster will be throttled on ${gracePeriodEnd.format("MMMM Do, YYYY")} unless a new license key is added. Learn more`,
);
});
diff --git a/pkg/ui/workspaces/db-console/src/views/shared/components/alertBar/alertBar.tsx b/pkg/ui/workspaces/db-console/src/views/shared/components/alertBar/alertBar.tsx
index b558bfeb429d..73a5c2b06dd7 100644
--- a/pkg/ui/workspaces/db-console/src/views/shared/components/alertBar/alertBar.tsx
+++ b/pkg/ui/workspaces/db-console/src/views/shared/components/alertBar/alertBar.tsx
@@ -131,8 +131,8 @@ export const AlertBar = ({
Your license key expired on{" "}
{licenseExpiryDate.format("MMMM Do, YYYY")}. The cluster will be
- throttled on {gracePeriodEnd.format("MMMM Do, YYYY")} unless the
- license is renewed.
Learn more
+ throttled on {gracePeriodEnd.format("MMMM Do, YYYY")} unless a new
+ license key is added.
Learn more
);
}
diff --git a/pkg/util/encoding/encoding.go b/pkg/util/encoding/encoding.go
index 86fcaa353bf7..71716f1c01a1 100644
--- a/pkg/util/encoding/encoding.go
+++ b/pkg/util/encoding/encoding.go
@@ -3749,19 +3749,3 @@ func BytesPrevish(b []byte, length int) []byte {
copy(buf[bLen:], bytes.Repeat([]byte{0xff}, length-bLen))
return buf
}
-
-// unsafeWrapper is implementation of SafeFormatter. This is used to mark
-// arguments as unsafe for redaction. This would make sure that redact.Unsafe() is implementing SafeFormatter interface
-// without affecting invocations.
-// TODO(aa-joshi): This is a temporary solution to mark arguments as unsafe. We should move/update this into cockroachdb/redact package.
-type unsafeWrapper struct {
- a any
-}
-
-func (uw unsafeWrapper) SafeFormat(w redact.SafePrinter, _ rune) {
- w.Print(redact.Unsafe(uw.a))
-}
-
-func Unsafe(args any) any {
- return unsafeWrapper{a: args}
-}
diff --git a/pkg/util/log/gen/main.go b/pkg/util/log/gen/main.go
index 2fc115b84645..35c1d4c39435 100644
--- a/pkg/util/log/gen/main.go
+++ b/pkg/util/log/gen/main.go
@@ -417,7 +417,7 @@ func (logger{{.Name}}) Shoutf(ctx context.Context, sev Severity, format string,
// verbosity level is active.
{{.Comment -}}
func (logger{{.Name}}) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.{{.NAME}}, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.{{.NAME}}, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
diff --git a/pkg/util/log/log_channels_generated.go b/pkg/util/log/log_channels_generated.go
index 20f4f9912c61..d66d4140cd53 100644
--- a/pkg/util/log/log_channels_generated.go
+++ b/pkg/util/log/log_channels_generated.go
@@ -972,7 +972,7 @@ func (loggerDev) Shoutf(ctx context.Context, sev Severity, format string, args .
// sensitive operational data.
// See [Configure logs](configure-logs.html#dev-channel).
func (loggerDev) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.DEV, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.DEV, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -1504,7 +1504,7 @@ func (loggerOps) Shoutf(ctx context.Context, sev Severity, format string, args .
// - [Cluster setting](cluster-settings.html) changes
// - [Zone configuration](configure-replication-zones.html) changes
func (loggerOps) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.OPS, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.OPS, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -1939,7 +1939,7 @@ func (loggerHealth) Shoutf(ctx context.Context, sev Severity, format string, arg
// - Range and table leasing events
// - Up- and down-replication, range unavailability
func (loggerHealth) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.HEALTH, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.HEALTH, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -2248,7 +2248,7 @@ func (loggerStorage) Shoutf(ctx context.Context, sev Severity, format string, ar
// The `STORAGE` channel is used to report low-level storage
// layer events (RocksDB/Pebble).
func (loggerStorage) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.STORAGE, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.STORAGE, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -2705,7 +2705,7 @@ func (loggerSessions) Shoutf(ctx context.Context, sev Severity, format string, a
// This is typically configured in "audit" mode, with event
// numbering and synchronous writes.
func (loggerSessions) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.SESSIONS, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.SESSIONS, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -3238,7 +3238,7 @@ func (loggerSqlSchema) Shoutf(ctx context.Context, sev Severity, format string,
// `SQL_SCHEMA` events generally comprise changes to the schema that affect the
// functional behavior of client apps using stored objects.
func (loggerSqlSchema) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.SQL_SCHEMA, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.SQL_SCHEMA, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -3717,7 +3717,7 @@ func (loggerUserAdmin) Shoutf(ctx context.Context, sev Severity, format string,
// This is typically configured in "audit" mode, with event
// numbering and synchronous writes.
func (loggerUserAdmin) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.USER_ADMIN, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.USER_ADMIN, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -4150,7 +4150,7 @@ func (loggerPrivileges) Shoutf(ctx context.Context, sev Severity, format string,
// This is typically configured in "audit" mode, with event
// numbering and synchronous writes.
func (loggerPrivileges) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.PRIVILEGES, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.PRIVILEGES, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -4659,7 +4659,7 @@ func (loggerSensitiveAccess) Shoutf(ctx context.Context, sev Severity, format st
// This is typically configured in "audit" mode, with event
// numbering and synchronous writes.
func (loggerSensitiveAccess) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.SENSITIVE_ACCESS, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.SENSITIVE_ACCESS, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -5056,7 +5056,7 @@ func (loggerSqlExec) Shoutf(ctx context.Context, sev Severity, format string, ar
// `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html))
// - uncaught Go panic errors during the execution of a SQL statement.
func (loggerSqlExec) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.SQL_EXEC, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.SQL_EXEC, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -5501,7 +5501,7 @@ func (loggerSqlPerf) Shoutf(ctx context.Context, sev Severity, format string, ar
// with versions prior to v21.1, where the corresponding events
// were redirected to separate files.
func (loggerSqlPerf) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.SQL_PERF, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.SQL_PERF, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -5852,7 +5852,7 @@ func (loggerSqlInternalPerf) Shoutf(ctx context.Context, sev Severity, format st
// channel so as to not pollute the `SQL_PERF` logging output with
// internal troubleshooting details.
func (loggerSqlInternalPerf) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.SQL_INTERNAL_PERF, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.SQL_INTERNAL_PERF, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -6173,7 +6173,7 @@ func (loggerTelemetry) Shoutf(ctx context.Context, sev Severity, format string,
// feature usage within CockroachDB and anonymizes any application-
// specific data.
func (loggerTelemetry) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.TELEMETRY, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.TELEMETRY, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
@@ -6492,7 +6492,7 @@ func (loggerKvDistribution) Shoutf(ctx context.Context, sev Severity, format str
// replicas between stores in the cluster, or adding (removing) replicas to
// ranges.
func (loggerKvDistribution) VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.KV_DISTRIBUTION, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.KV_DISTRIBUTION, msg)
}
// VEventf either logs a message to the channel (which also outputs to the
diff --git a/pkg/util/log/redact.go b/pkg/util/log/redact.go
index 54ff365bde97..cb4c7de33a85 100644
--- a/pkg/util/log/redact.go
+++ b/pkg/util/log/redact.go
@@ -135,7 +135,7 @@ func maybeRedactEntry(payload entryPayload, editor redactEditor) (res entryPaylo
func init() {
// We consider booleans and numeric values to be always safe for
- // reporting. A log call can opt out by using encoding.Unsafe() around
+ // reporting. A log call can opt out by using redact.Unsafe() around
// a value that would be otherwise considered safe.
redact.RegisterSafeType(reflect.TypeOf(true)) // bool
redact.RegisterSafeType(reflect.TypeOf(123)) // int
diff --git a/pkg/util/log/test_log_scope.go b/pkg/util/log/test_log_scope.go
index 596175d768ec..e3f4f5e5c022 100644
--- a/pkg/util/log/test_log_scope.go
+++ b/pkg/util/log/test_log_scope.go
@@ -62,7 +62,7 @@ type tShim interface {
// Scope creates a TestLogScope which corresponds to the lifetime of a
// temporary logging directory. If -show-logs was passed on the
-// // command line, this is a no-op. Otherwise, it behaves
+// command line, this is a no-op. Otherwise, it behaves
// like ScopeWithoutShowLogs().
//
// See the documentation of ScopeWithoutShowLogs() for API usage and
diff --git a/pkg/util/log/trace.go b/pkg/util/log/trace.go
index 6c09fb2e110e..1174ce8a3ae4 100644
--- a/pkg/util/log/trace.go
+++ b/pkg/util/log/trace.go
@@ -98,6 +98,16 @@ func Eventf(ctx context.Context, format string, args ...interface{}) {
eventInternal(sp, false /* isErr */, &entry)
}
+// NOTE: we maintain a vEvent function separate from vEventf, instead of having
+// all VEvent callers invoke vEventf directly, so that the heap allocation from
+// the `msg` parameter escaping when packed into a vararg slice is not incurred
+// on the no-op path.
+func vEvent(ctx context.Context, isErr bool, depth int, level Level, ch Channel, msg string) {
+ if VDepth(level, 1+depth) || getSpan(ctx) != nil {
+ vEventf(ctx, isErr, 1+depth, level, ch, "%s", msg)
+ }
+}
+
func vEventf(
ctx context.Context,
isErr bool,
@@ -114,12 +124,7 @@ func vEventf(
sev = severity.ERROR
}
logfDepth(ctx, 1+depth, sev, ch, format, args...)
- } else {
- sp := getSpan(ctx)
- if sp == nil {
- // Nothing to log. Skip the work.
- return
- }
+ } else if sp := getSpan(ctx); sp != nil {
entry := makeUnstructuredEntry(ctx,
severity.INFO, /* unused for trace events */
channel.DEV, /* unused for trace events */
@@ -134,7 +139,7 @@ func vEventf(
// active trace) or to the trace alone, depending on whether the specified
// verbosity level is active.
func VEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, false /* isErr */, 1, level, channel.DEV, "%s", msg)
+ vEvent(ctx, false /* isErr */, 1, level, channel.DEV, msg)
}
// VEventf either logs a message to the DEV channel (which also outputs to the
@@ -154,7 +159,7 @@ func VEventfDepth(ctx context.Context, depth int, level Level, format string, ar
// outputs to the active trace) or to the trace alone, depending on whether
// the specified verbosity level is active.
func VErrEvent(ctx context.Context, level Level, msg string) {
- vEventf(ctx, true /* isErr */, 1, level, channel.DEV, "%s", msg)
+ vEvent(ctx, true /* isErr */, 1, level, channel.DEV, msg)
}
// VErrEventf either logs an error message to the DEV Channel (which also
diff --git a/pkg/util/log/trace_test.go b/pkg/util/log/trace_test.go
index 381b0889b347..5d1e71e86275 100644
--- a/pkg/util/log/trace_test.go
+++ b/pkg/util/log/trace_test.go
@@ -38,3 +38,21 @@ func TestTrace(t *testing.T) {
t.Fatal(err)
}
}
+
+// BenchmarkVEventNoop measures the cost of a VEvent call when neither verbose
+// logging nor tracing is enabled.
+func BenchmarkVEventNoop(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ VEvent(context.Background(), 1, "should be free")
+ }
+}
+
+// BenchmarkVEventfNoop measures the cost of a VEventf call when neither verbose
+// logging nor tracing is enabled.
+func BenchmarkVEventfNoop(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ VEventf(context.Background(), 1, "%s", "should be free")
+ }
+}
diff --git a/pkg/util/num32/BUILD.bazel b/pkg/util/num32/BUILD.bazel
deleted file mode 100644
index 6ae398e9d666..000000000000
--- a/pkg/util/num32/BUILD.bazel
+++ /dev/null
@@ -1,19 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
- name = "num32",
- srcs = [
- "doc.go",
- "vec.go",
- ],
- importpath = "github.com/cockroachdb/cockroach/pkg/util/num32",
- visibility = ["//visibility:public"],
- deps = ["@com_github_cockroachdb_errors//:errors"],
-)
-
-go_test(
- name = "num32_test",
- srcs = ["vec_test.go"],
- embed = [":num32"],
- deps = ["@com_github_stretchr_testify//require"],
-)
diff --git a/pkg/util/num32/doc.go b/pkg/util/num32/doc.go
deleted file mode 100644
index e2b94a62fd25..000000000000
--- a/pkg/util/num32/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2024 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-/*
-Package num32 contains basic numeric functions that operate on scalar, vector,
-and matrix float32 values. Inputs and outputs deliberately use simple float
-types so that they can be used in multiple contexts. It uses the gonum library
-when possible, since it offers assembly language implementations of various
-useful primitives.
-
-Using the same convention as gonum, when a slice is being modified in place, it
-has the name dst and the function does not return a value.
-
-Where possible, functions in this package are written with the assumption that
-the caller prevents bad input. They will panic with assertion errors if this is
-not the case, rather than returning error values. Callers should generally have
-panic recovery logic further up the stack to gracefully handle these assertions,
-as they indicate buggy code.
-*/
-package num32
diff --git a/pkg/util/num32/vec.go b/pkg/util/num32/vec.go
deleted file mode 100644
index 8db42ed64bbf..000000000000
--- a/pkg/util/num32/vec.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2024 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package num32
-
-import (
- "math"
-
- "github.com/cockroachdb/errors"
-)
-
-// L1Distance returns the L1 norm of s - t, which is the Manhattan distance
-// between the two vectors.
-func L1Distance(s []float32, t []float32) float32 {
- checkDims(s, t)
- var distance float32
- for i := range s {
- diff := s[i] - t[i]
- distance += float32(math.Abs(float64(diff)))
- }
- return distance
-}
-
-// L2SquaredDistance returns the squared L2 norm of s - t, which is the squared
-// Euclidean distance between the two vectors. Comparing squared distance is
-// equivalent to comparing distance, but the squared distance avoids an
-// expensive square-root operation.
-func L2SquaredDistance(s, t []float32) float32 {
- checkDims(s, t)
- var distance float32
- for i := range s {
- diff := s[i] - t[i]
- distance += diff * diff
- }
- return distance
-}
-
-// InnerProduct returns the inner product of t1 and t2, also called the dot
-// product.
-func InnerProduct(s []float32, t []float32) float32 {
- checkDims(s, t)
- var distance float32
- for i := range s {
- distance += s[i] * t[i]
- }
- return distance
-}
-
-func checkDims(v []float32, v2 []float32) {
- if len(v) != len(v2) {
- panic(errors.AssertionFailedf("different vector dimensions %d and %d", len(v), len(v2)))
- }
-}
diff --git a/pkg/util/num32/vec_test.go b/pkg/util/num32/vec_test.go
deleted file mode 100644
index bfc336f685bd..000000000000
--- a/pkg/util/num32/vec_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2024 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package num32
-
-import (
- "math"
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-var NaN32 = float32(math.NaN())
-var Inf32 = float32(math.Inf(1))
-
-func TestDistances(t *testing.T) {
- // Test L1, L2, Cosine distance.
- testCases := []struct {
- v1 []float32
- v2 []float32
- l1 float32
- l2s float32
- panics bool
- }{
- {v1: []float32{}, v2: []float32{}, l1: 0, l2s: 0},
- {v1: []float32{1, 2, 3}, v2: []float32{4, 5, 6}, l1: 9, l2s: 27},
- {v1: []float32{-1, -2, -3}, v2: []float32{-4, -5, -6}, l1: 9, l2s: 27},
- {v1: []float32{1, 2, 3}, v2: []float32{1, 2, 3}, l1: 0, l2s: 0},
- {v1: []float32{1, 2, 3}, v2: []float32{1, 2, 4}, l1: 1, l2s: 1},
- {v1: []float32{NaN32}, v2: []float32{1}, l1: NaN32, l2s: NaN32},
- {v1: []float32{Inf32}, v2: []float32{1}, l1: Inf32, l2s: Inf32},
- {v1: []float32{1, 2}, v2: []float32{3, 4, 5}, panics: true},
- }
-
- for _, tc := range testCases {
- if !tc.panics {
- l1 := L1Distance(tc.v1, tc.v2)
- l2s := L2SquaredDistance(tc.v1, tc.v2)
- require.InDelta(t, tc.l1, l1, 0.000001)
- require.InDelta(t, tc.l2s, l2s, 0.000001)
- } else {
- require.Panics(t, func() { L1Distance(tc.v1, tc.v2) })
- require.Panics(t, func() { L2SquaredDistance(tc.v1, tc.v2) })
- }
- }
-}
-
-func TestInnerProduct(t *testing.T) {
- // Test inner product and negative inner product
- testCases := []struct {
- v1 []float32
- v2 []float32
- ip float32
- panics bool
- }{
- {v1: []float32{}, v2: []float32{}, ip: 0},
- {v1: []float32{1, 2, 3}, v2: []float32{4, 5, 6}, ip: 32},
- {v1: []float32{-1, -2, -3}, v2: []float32{-4, -5, -6}, ip: 32},
- {v1: []float32{0, 0, 0}, v2: []float32{0, 0, 0}, ip: 0},
- {v1: []float32{1, 2, 3}, v2: []float32{1, 2, 3}, ip: 14},
- {v1: []float32{1, 2, 3}, v2: []float32{1, 2, 4}, ip: 17},
- {v1: []float32{NaN32}, v2: []float32{1}, ip: NaN32},
- {v1: []float32{Inf32}, v2: []float32{1}, ip: Inf32},
- {v1: []float32{1, 2}, v2: []float32{3, 4, 5}, panics: true},
- }
-
- for _, tc := range testCases {
- if !tc.panics {
- ip := InnerProduct(tc.v1, tc.v2)
- require.InDelta(t, tc.ip, ip, 0.000001)
- } else {
- require.Panics(t, func() { InnerProduct(tc.v1, tc.v2) })
- }
- }
-}
diff --git a/pkg/util/tracing/span_test.go b/pkg/util/tracing/span_test.go
index 69e4280666d0..9416d9d602b3 100644
--- a/pkg/util/tracing/span_test.go
+++ b/pkg/util/tracing/span_test.go
@@ -935,7 +935,11 @@ func TestSpan_UseAfterFinish(t *testing.T) {
// below.
for i := 0; i < 20; i++ {
t.Run("invoke", func(t *testing.T) {
- f.Func.Call(args)
+ if i == 9 {
+ f.Func.Call(args)
+ } else {
+ f.Func.Call(args)
+ }
})
}
})
diff --git a/pkg/util/vector/BUILD.bazel b/pkg/util/vector/BUILD.bazel
index df2ab07dfce5..f4aeca51ac43 100644
--- a/pkg/util/vector/BUILD.bazel
+++ b/pkg/util/vector/BUILD.bazel
@@ -1,52 +1,23 @@
-load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
-load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "vector",
- srcs = [
- "vector.go",
- "vector_set.go",
- ],
- embed = [":vector_go_proto"],
+ srcs = ["vector.go"],
importpath = "github.com/cockroachdb/cockroach/pkg/util/vector",
visibility = ["//visibility:public"],
deps = [
"//pkg/sql/pgwire/pgcode",
"//pkg/sql/pgwire/pgerror",
"//pkg/util/encoding",
- "//pkg/util/num32",
- "@com_github_cockroachdb_errors//:errors",
],
)
go_test(
name = "vector_test",
- srcs = [
- "vector_set_test.go",
- "vector_test.go",
- ],
+ srcs = ["vector_test.go"],
embed = [":vector"],
deps = [
"//pkg/util/randutil",
"@com_github_stretchr_testify//assert",
- "@com_github_stretchr_testify//require",
],
)
-
-proto_library(
- name = "vector_proto",
- srcs = ["vector.proto"],
- strip_import_prefix = "/pkg",
- visibility = ["//visibility:public"],
- deps = ["@com_github_gogo_protobuf//gogoproto:gogo_proto"],
-)
-
-go_proto_library(
- name = "vector_go_proto",
- compilers = ["//pkg/cmd/protoc-gen-gogoroach:protoc-gen-gogoroach_compiler"],
- importpath = "github.com/cockroachdb/cockroach/pkg/util/vector",
- proto = ":vector_proto",
- visibility = ["//visibility:public"],
- deps = ["@com_github_gogo_protobuf//gogoproto"],
-)
diff --git a/pkg/util/vector/vector.go b/pkg/util/vector/vector.go
index 51789e1b589e..540ebfa86818 100644
--- a/pkg/util/vector/vector.go
+++ b/pkg/util/vector/vector.go
@@ -14,7 +14,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
- "github.com/cockroachdb/cockroach/pkg/util/num32"
)
// MaxDim is the maximum number of dimensions a vector can have.
@@ -64,15 +63,6 @@ func ParseVector(input string) (T, error) {
return vector, nil
}
-// AsSet returns this vector a set of one vector.
-func (v T) AsSet() Set {
- return Set{
- Dims: len(v),
- Count: 1,
- Data: v[:len(v):len(v)],
- }
-}
-
// String implements the fmt.Stringer interface.
func (v T) String() string {
var sb strings.Builder
@@ -138,12 +128,24 @@ func Decode(b []byte) (ret T, err error) {
return ret, nil
}
+func checkDims(t T, t2 T) error {
+ if len(t) != len(t2) {
+ return pgerror.Newf(pgcode.DataException, "different vector dimensions %d and %d", len(t), len(t2))
+ }
+ return nil
+}
+
// L1Distance returns the L1 (Manhattan) distance between t and t2.
func L1Distance(t T, t2 T) (float64, error) {
if err := checkDims(t, t2); err != nil {
return 0, err
}
- return float64(num32.L1Distance(t, t2)), nil
+ var distance float32
+ for i := range len(t) {
+ diff := t[i] - t2[i]
+ distance += float32(math.Abs(float64(diff)))
+ }
+ return float64(distance), nil
}
// L2Distance returns the Euclidean distance between t and t2.
@@ -151,36 +153,29 @@ func L2Distance(t T, t2 T) (float64, error) {
if err := checkDims(t, t2); err != nil {
return 0, err
}
+ var distance float32
+ for i := range len(t) {
+ diff := t[i] - t2[i]
+ distance += diff * diff
+ }
// TODO(queries): check for overflow and validate intermediate result if needed.
- return math.Sqrt(float64(num32.L2SquaredDistance(t, t2))), nil
+ return math.Sqrt(float64(distance)), nil
}
-// CosDistance returns the cosine distance between t and t2. This represents the
-// similarity between the two vectors, ranging from 0 (most similar) to 2 (least
-// similar). Only the angle between the vectors matters; the norms (magnitudes)
-// are irrelevant.
+// CosDistance returns the cosine distance between t and t2.
func CosDistance(t T, t2 T) (float64, error) {
if err := checkDims(t, t2); err != nil {
return 0, err
}
-
- // Compute the cosine of the angle between the two vectors as their dot
- // product divided by the product of their norms:
- // t·t2
- // -----------
- // ||t|| ||t2||
- var dot, normA, normB float32
- for i := range t {
- dot += t[i] * t2[i]
+ var distance, normA, normB float32
+ for i := range len(t) {
+ distance += t[i] * t2[i]
normA += t[i] * t[i]
normB += t2[i] * t2[i]
}
-
- // Use sqrt(a * b) over sqrt(a) * sqrt(b) to compute norms.
- similarity := float64(dot) / math.Sqrt(float64(normA)*float64(normB))
-
- // Cosine distance = 1 - cosine similarity. Ensure that similarity always
- // stays within [-1, 1] despite any floating point arithmetic error.
+ // Use sqrt(a * b) over sqrt(a) * sqrt(b)
+ similarity := float64(distance) / math.Sqrt(float64(normA)*float64(normB))
+ /* Keep in range */
if similarity > 1 {
similarity = 1
} else if similarity < -1 {
@@ -194,7 +189,11 @@ func InnerProduct(t T, t2 T) (float64, error) {
if err := checkDims(t, t2); err != nil {
return 0, err
}
- return float64(num32.InnerProduct(t, t2)), nil
+ var distance float32
+ for i := range len(t) {
+ distance += t[i] * t2[i]
+ }
+ return float64(distance), nil
}
// NegInnerProduct returns the negative inner product of t1 and t2.
@@ -282,10 +281,3 @@ func Random(rng *rand.Rand) T {
}
return v
}
-
-func checkDims(t T, t2 T) error {
- if len(t) != len(t2) {
- return pgerror.Newf(pgcode.DataException, "different vector dimensions %d and %d", len(t), len(t2))
- }
- return nil
-}
diff --git a/pkg/util/vector/vector.proto b/pkg/util/vector/vector.proto
deleted file mode 100644
index 06ffdd8ba5a6..000000000000
--- a/pkg/util/vector/vector.proto
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2024 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-syntax = "proto3";
-package cockroach.util.vector;
-option go_package = "github.com/cockroachdb/cockroach/pkg/util/vector";
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.goproto_getters_all) = false;
-
-// Set is a set of float32 vectors of equal dimension. Vectors in the set are
-// stored contiguously in a slice, in row-wise order. They are assumed to be
-// unordered; some methods do not preserve ordering.
-message Set {
- // Dims is the number of dimensions of each vector in the set.
- int64 dims = 1 [(gogoproto.casttype) = "int"];
- // Count is the number of vectors in the set.
- int64 count = 2 [(gogoproto.casttype) = "int"];
- // Data is a float32 slice that contains all vectors, laid out contiguously in
- // row-wise order in memory.
- // NB: Avoid using this field directly, instead preferring to use the At
- // function to access individual vectors.
- repeated float data = 3;
-}
diff --git a/pkg/util/vector/vector_set.go b/pkg/util/vector/vector_set.go
deleted file mode 100644
index e2fa666704e1..000000000000
--- a/pkg/util/vector/vector_set.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2024 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package vector
-
-import (
- "slices"
-
- "github.com/cockroachdb/errors"
-)
-
-// MakeSet constructs a new empty vector set with the given number of
-// dimensions. New vectors can be added via the Add or AddSet methods.
-func MakeSet(dims int) Set {
- return Set{Dims: dims}
-}
-
-// MakeSetFromRawData constructs a new vector set from a raw slice of vectors.
-// The vectors in the slice have the given number of dimensions and are laid out
-// contiguously in row-wise order.
-// NB: The data slice is directly used rather than copied; any outside changes
-// to it will be reflected in the vector set.
-func MakeSetFromRawData(data []float32, dims int) Set {
- if len(data)%dims != 0 {
- panic(errors.AssertionFailedf(
- "data length %d is not a multiple of %d dimensions", len(data), dims))
- }
- return Set{
- Dims: dims,
- Count: len(data) / dims,
- Data: data,
- }
-}
-
-// At returns the vector at the given offset in the set.
-//
-//gcassert:inline
-func (vs *Set) At(offset int) T {
- start := offset * vs.Dims
- end := start + vs.Dims
- return vs.Data[start:end:end]
-}
-
-// SplitAt divides the vector set into two subsets at the given offset. This
-// vector set is updated to contain only the vectors before the split point, and
-// the returned set contains only the vectors on or after the split point.
-func (vs *Set) SplitAt(offset int) Set {
- if offset > vs.Count {
- panic(errors.AssertionFailedf(
- "split point %d cannot be greater than set size %d", offset, vs.Count))
- }
-
- split := offset * vs.Dims
- other := Set{
- Dims: vs.Dims,
- Count: vs.Count - offset,
- Data: vs.Data[split:],
- }
-
- // Specify capacity of the slice so that it's safe to add vectors to this
- // set without impacting the returned set.
- vs.Data = vs.Data[:split:split]
- vs.Count = offset
- return other
-}
-
-// Add appends a new vector to the set.
-func (vs *Set) Add(v T) {
- if vs.Dims != len(v) {
- panic(errors.AssertionFailedf(
- "cannot add vector with %d dimensions to a set with %d dimensions", len(v), vs.Dims))
- }
- vs.Data = append(vs.Data, v...)
- vs.Count++
-}
-
-// AddSet appends all vectors from the given set to this set.
-func (vs *Set) AddSet(vectors *Set) {
- if vs.Dims != vectors.Dims {
- panic(errors.AssertionFailedf(
- "cannot add vector set with %d dimensions to a set with %d dimensions",
- vectors.Dims, vs.Dims))
- }
- vs.Data = append(vs.Data, vectors.Data...)
- vs.Count += vectors.Count
-}
-
-// AddZero adds the given count of zero vectors to this set.
-func (vs *Set) AddZero(count int) {
- vs.Data = slices.Grow(vs.Data, count*vs.Dims)
- vs.Count += count
- start := len(vs.Data)
- end := vs.Count * vs.Dims
- vs.Data = vs.Data[:end]
- for i := start; i < end; i++ {
- vs.Data[i] = 0
- }
-}
-
-// ReplaceWithLast removes the vector at the given offset from the set,
-// replacing it with the last vector in the set. The modified set has one less
-// element and the last vector's position changes.
-func (vs *Set) ReplaceWithLast(offset int) {
- targetStart := offset * vs.Dims
- sourceEnd := len(vs.Data)
- copy(vs.Data[targetStart:targetStart+vs.Dims], vs.Data[sourceEnd-vs.Dims:sourceEnd])
- vs.Data = vs.Data[:sourceEnd-vs.Dims]
- vs.Count--
-}
-
-// EnsureCapacity grows the underlying data slice if needed to ensure the
-// requested capacity. This is useful to prevent unnecessary resizing when it's
-// known up-front how big the vector set will need to get.
-func (vs *Set) EnsureCapacity(capacity int) {
- if vs.Count < capacity {
- vs.Data = slices.Grow(vs.Data, (capacity-vs.Count)*vs.Dims)
- }
-}
diff --git a/pkg/util/vector/vector_set_test.go b/pkg/util/vector/vector_set_test.go
deleted file mode 100644
index 6d2a47990fd4..000000000000
--- a/pkg/util/vector/vector_set_test.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2024 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package vector
-
-import (
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestVectorSet(t *testing.T) {
- vs := MakeSet(2)
- require.Equal(t, 2, vs.Dims)
- require.Equal(t, 0, vs.Count)
-
- // Add methods.
- v1 := T{1, 2}
- v2 := T{5, 3}
- v3 := T{6, 6}
- vs.Add(v1)
- vs.Add(v2)
- vs.Add(v3)
- require.Equal(t, 3, vs.Count)
- require.Equal(t, []float32{1, 2, 5, 3, 6, 6}, vs.Data)
-
- vs.AddSet(&vs)
- require.Equal(t, 6, vs.Count)
- require.Equal(t, []float32{1, 2, 5, 3, 6, 6, 1, 2, 5, 3, 6, 6}, vs.Data)
-
- vs.AddZero(2)
- vs.AddZero(0)
- require.Equal(t, 8, vs.Count)
- require.Equal(t, []float32{1, 2, 5, 3, 6, 6, 1, 2, 5, 3, 6, 6, 0, 0, 0, 0}, vs.Data)
-
- // ReplaceWithLast.
- vs.ReplaceWithLast(1)
- vs.ReplaceWithLast(4)
- vs.ReplaceWithLast(5)
- require.Equal(t, 5, vs.Count)
- require.Equal(t, []float32{1, 2, 0, 0, 6, 6, 1, 2, 0, 0}, vs.Data)
-
- // Add zero again, to ensure that reusing memory still zeroes it.
- vs.AddZero(1)
- require.Equal(t, []float32{1, 2, 0, 0, 6, 6, 1, 2, 0, 0, 0, 0}, vs.Data)
-
- vs3 := MakeSetFromRawData(vs.Data, 2)
- require.Equal(t, vs, vs3)
-
- // Ensure capacity.
- vs4 := MakeSet(3)
- vs4.EnsureCapacity(5)
- require.Equal(t, 0, len(vs4.Data))
- require.GreaterOrEqual(t, cap(vs4.Data), 15)
- vs4.AddZero(2)
- require.Equal(t, 2, vs4.Count)
- require.Equal(t, 6, len(vs4.Data))
-
- // AsSet.
- vs5 := T{1, 2, 3}.AsSet()
- require.Equal(t, 3, cap(vs5.Data))
- vs4.AddSet(&vs5)
- require.Equal(t, 3, vs4.Count)
- require.Equal(t, []float32{0, 0, 0, 0, 0, 0, 1, 2, 3}, vs4.Data)
-
- // SplitAt.
- vs6 := MakeSetFromRawData([]float32{1, 2, 3, 4, 5, 6}, 2)
- vs7 := vs6.SplitAt(0)
- require.Equal(t, 0, vs6.Count)
- require.Equal(t, []float32{}, vs6.Data)
- require.Equal(t, 3, vs7.Count)
- require.Equal(t, []float32{1, 2, 3, 4, 5, 6}, vs7.Data)
-
- // Append to vs6 and ensure that it does not affect vs7.
- vs6.Add([]float32{7, 8})
- require.Equal(t, []float32{1, 2, 3, 4, 5, 6}, vs7.Data)
-
- vs8 := vs7.SplitAt(2)
- require.Equal(t, 2, vs7.Count)
- require.Equal(t, []float32{1, 2, 3, 4}, vs7.Data)
- require.Equal(t, 1, vs8.Count)
- require.Equal(t, []float32{5, 6}, vs8.Data)
-
- vs9 := vs7.SplitAt(2)
- require.Equal(t, 2, vs7.Count)
- require.Equal(t, []float32{1, 2, 3, 4}, vs7.Data)
- require.Equal(t, 0, vs9.Count)
- require.Equal(t, []float32{}, vs9.Data)
-
- // Check that invalid operations will panic.
- vs11 := MakeSetFromRawData([]float32{1, 2, 3, 4, 5, 6}, 2)
- require.Panics(t, func() { vs11.At(-1) })
- require.Panics(t, func() { vs11.SplitAt(-1) })
- require.Panics(t, func() { vs11.AddZero(-1) })
- require.Panics(t, func() { vs11.AddSet(nil) })
- require.Panics(t, func() { vs11.ReplaceWithLast(-1) })
-
- vs12 := MakeSet(2)
- require.Panics(t, func() { vs12.At(0) })
- require.Panics(t, func() { vs12.SplitAt(1) })
- require.Panics(t, func() { vs12.ReplaceWithLast(0) })
-
- vs13 := MakeSet(-1)
- require.Panics(t, func() { vs13.Add(v1) })
- require.Panics(t, func() { vs13.AddZero(1) })
-}
diff --git a/pkg/util/vector/vector_test.go b/pkg/util/vector/vector_test.go
index bff535de9736..a42784c8a05f 100644
--- a/pkg/util/vector/vector_test.go
+++ b/pkg/util/vector/vector_test.go
@@ -12,12 +12,8 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
)
-var NaN32 = float32(math.NaN())
-var Inf32 = float32(math.Inf(1))
-
func TestParseVector(t *testing.T) {
testCases := []struct {
input string
@@ -78,42 +74,84 @@ func TestRoundtripRandomPGVector(t *testing.T) {
}
}
-func TestCosDistance(t *testing.T) {
+func TestDistances(t *testing.T) {
// Test L1, L2, Cosine distance.
testCases := []struct {
- v1 []float32
- v2 []float32
+ v1 T
+ v2 T
+ l1 float64
+ l2 float64
cos float64
err bool
}{
- {v1: []float32{}, v2: []float32{}, cos: math.NaN(), err: false},
- {v1: []float32{1, 2, 3}, v2: []float32{4, 5, 6}, cos: 0.02536815, err: false},
- {v1: []float32{-1, -2, -3}, v2: []float32{-4, -5, -6}, cos: 0.02536815, err: false},
- {v1: []float32{1, 2, 3}, v2: []float32{1, 2, 3}, cos: 0, err: false},
- {v1: []float32{1, 2, 3}, v2: []float32{1, 2, 4}, cos: 0.008539, err: false},
- {v1: []float32{NaN32}, v2: []float32{1}, cos: math.NaN(), err: false},
- {v1: []float32{Inf32}, v2: []float32{1}, cos: math.NaN(), err: false},
+ {v1: T{1, 2, 3}, v2: T{4, 5, 6}, l1: 9, l2: 5.196152422, cos: 0.02536815, err: false},
+ {v1: T{-1, -2, -3}, v2: T{-4, -5, -6}, l1: 9, l2: 5.196152422, cos: 0.02536815, err: false},
+ {v1: T{0, 0, 0}, v2: T{0, 0, 0}, l1: 0, l2: 0, cos: math.NaN(), err: false},
+ {v1: T{1, 2, 3}, v2: T{1, 2, 3}, l1: 0, l2: 0, cos: 0, err: false},
+ {v1: T{1, 2, 3}, v2: T{1, 2, 4}, l1: 1, l2: 1, cos: 0.008539, err: false},
// Different vector sizes errors.
{v1: T{1, 2, 3}, v2: T{4, 5}, err: true},
}
for _, tc := range testCases {
+ l1, l1Err := L1Distance(tc.v1, tc.v2)
+ l2, l2Err := L2Distance(tc.v1, tc.v2)
cos, cosErr := CosDistance(tc.v1, tc.v2)
+
if tc.err {
+ assert.Error(t, l1Err)
+ assert.Error(t, l2Err)
assert.Error(t, cosErr)
} else {
+ assert.NoError(t, l1Err)
+ assert.NoError(t, l2Err)
assert.NoError(t, cosErr)
+ assert.InDelta(t, tc.l1, l1, 0.000001)
+ assert.InDelta(t, tc.l2, l2, 0.000001)
assert.InDelta(t, tc.cos, cos, 0.000001)
}
}
}
+func TestProducts(t *testing.T) {
+ // Test inner product and negative inner product
+ testCases := []struct {
+ v1 T
+ v2 T
+ ip float64
+ negIp float64
+ err bool
+ }{
+ {v1: T{1, 2, 3}, v2: T{4, 5, 6}, ip: 32, negIp: -32, err: false},
+ {v1: T{-1, -2, -3}, v2: T{-4, -5, -6}, ip: 32, negIp: -32, err: false},
+ {v1: T{0, 0, 0}, v2: T{0, 0, 0}, ip: 0, negIp: 0, err: false},
+ {v1: T{1, 2, 3}, v2: T{1, 2, 3}, ip: 14, negIp: -14, err: false},
+ {v1: T{1, 2, 3}, v2: T{1, 2, 4}, ip: 17, negIp: -17, err: false},
+ // Different vector sizes errors.
+ {v1: T{1, 2, 3}, v2: T{4, 5}, err: true},
+ }
+
+ for _, tc := range testCases {
+ ip, ipErr := InnerProduct(tc.v1, tc.v2)
+ negIp, negIpErr := NegInnerProduct(tc.v1, tc.v2)
+
+ if tc.err {
+ assert.Error(t, ipErr)
+ assert.Error(t, negIpErr)
+ } else {
+ assert.NoError(t, ipErr)
+ assert.NoError(t, negIpErr)
+ assert.InDelta(t, tc.ip, ip, 0.000001)
+ assert.InDelta(t, tc.negIp, negIp, 0.000001)
+ }
+ }
+}
+
func TestNorm(t *testing.T) {
testCases := []struct {
v T
norm float64
}{
- {v: T{}, norm: 0},
{v: T{1, 2, 3}, norm: 3.7416573867739413},
{v: T{0, 0, 0}, norm: 0},
{v: T{-1, -2, -3}, norm: 3.7416573867739413},
@@ -125,34 +163,6 @@ func TestNorm(t *testing.T) {
}
}
-// While the real work of these functions is done by the num32 package, test
-// that the wrapper functions are working.
-func TestNum32Functions(t *testing.T) {
- _, err := L1Distance(T{1, 2}, T{3, 4, 5})
- require.Error(t, err)
- res, err := L1Distance(T{1, 2, 3}, T{4, 5, 6})
- require.NoError(t, err)
- require.Equal(t, float64(9), res)
-
- _, err = L2Distance(T{1, 2}, T{3, 4, 5})
- require.Error(t, err)
- res, err = L2Distance(T{1, 2, 3}, T{4, 5, 6})
- require.NoError(t, err)
- require.InDelta(t, float64(5.196152422), res, 0.000001)
-
- _, err = InnerProduct(T{1, 2}, T{3, 4, 5})
- require.Error(t, err)
- res, err = InnerProduct(T{1, 2, 3}, T{4, 5, 6})
- require.NoError(t, err)
- require.Equal(t, float64(32), res)
-
- _, err = NegInnerProduct(T{1, 2}, T{3, 4, 5})
- require.Error(t, err)
- res, err = NegInnerProduct(T{1, 2, 3}, T{4, 5, 6})
- require.NoError(t, err)
- require.Equal(t, float64(-32), res)
-}
-
func TestPointwiseOps(t *testing.T) {
// Test L1, L2, Cosine distance.
testCases := []struct {
diff --git a/pkg/workload/BUILD.bazel b/pkg/workload/BUILD.bazel
index 4869b22a3589..021e6b154368 100644
--- a/pkg/workload/BUILD.bazel
+++ b/pkg/workload/BUILD.bazel
@@ -5,7 +5,6 @@ go_library(
srcs = [
"connection.go",
"csv.go",
- "datadog.go",
"driver.go",
"pgx_helpers.go",
"random.go",
@@ -29,8 +28,6 @@ go_library(
"//pkg/util/timeutil",
"//pkg/workload/histogram",
"@com_github_cockroachdb_errors//:errors",
- "@com_github_datadog_datadog_api_client_go_v2//api/datadog",
- "@com_github_datadog_datadog_api_client_go_v2//api/datadogV1",
"@com_github_jackc_pgx_v5//:pgx",
"@com_github_jackc_pgx_v5//pgconn",
"@com_github_jackc_pgx_v5//pgxpool",
diff --git a/pkg/workload/cli/BUILD.bazel b/pkg/workload/cli/BUILD.bazel
index d2d93e215f77..d820a43e570d 100644
--- a/pkg/workload/cli/BUILD.bazel
+++ b/pkg/workload/cli/BUILD.bazel
@@ -28,7 +28,6 @@ go_library(
"//pkg/workload/histogram/exporter",
"//pkg/workload/workloadsql",
"@com_github_cockroachdb_errors//:errors",
- "@com_github_datadog_datadog_api_client_go_v2//api/datadogV1",
"@com_github_prometheus_client_golang//prometheus/collectors",
"@com_github_prometheus_client_golang//prometheus/promhttp",
"@com_github_spf13_cobra//:cobra",
diff --git a/pkg/workload/cli/check.go b/pkg/workload/cli/check.go
index 994ca27e39b4..9cf7b97746f6 100644
--- a/pkg/workload/cli/check.go
+++ b/pkg/workload/cli/check.go
@@ -8,24 +8,14 @@ package cli
import (
"context"
gosql "database/sql"
- "fmt"
"strings"
- "github.com/DataDog/datadog-api-client-go/v2/api/datadogV1"
"github.com/cockroachdb/cockroach/pkg/workload"
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
-var checkFlags = pflag.NewFlagSet("check", pflag.ContinueOnError)
-var datadogSite = checkFlags.String("datadog-site", "us5.datadoghq.com",
- "Datadog site to communicate with (e.g., us5.datadoghq.com).")
-var datadogAPIKey = checkFlags.String("datadog-api-key", "",
- "Datadog API key to emit telemetry data to Datadog.")
-var datadogTags = checkFlags.String("datadog-tags", "",
- "A comma-separated list of tags to attach to telemetry data (e.g., key1:val1,key2:val2).")
-
func init() {
AddSubCmd(func(userFacing bool) *cobra.Command {
var checkCmd = SetCmdDefaults(&cobra.Command{
@@ -57,7 +47,6 @@ func init() {
Args: cobra.RangeArgs(0, 1),
})
genCheckCmd.Flags().AddFlagSet(genFlags)
- genCheckCmd.Flags().AddFlagSet(checkFlags)
genCheckCmd.Run = CmdHelper(gen, check)
checkCmd.AddCommand(genCheckCmd)
}
@@ -84,15 +73,5 @@ func check(gen workload.Generator, urls []string, dbName string) error {
if err := sqlDB.Ping(); err != nil {
return err
}
- err = fn(ctx, sqlDB)
- if err != nil {
- // For automated operations running the consistency checker like the DRT team,
- // there is a need to send an event to Datadog so that a Slack alert can be
- // configured. Here, we are attempting to emit an error event to Datadog.
- datadogContext := workload.NewDatadogContext(ctx, *datadogSite, *datadogAPIKey)
- title := fmt.Sprintf("Consistency check failed for %s", gen.Meta().Name)
- text := fmt.Sprintf("%v", err)
- workload.EmitDatadogEvent(datadogContext, title, text, datadogV1.EVENTALERTTYPE_ERROR, *datadogTags)
- }
- return err
+ return fn(ctx, sqlDB)
}
diff --git a/pkg/workload/datadog.go b/pkg/workload/datadog.go
deleted file mode 100644
index 01518fb6e977..000000000000
--- a/pkg/workload/datadog.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2018 The Cockroach Authors.
-//
-// Use of this software is governed by the CockroachDB Software License
-// included in the /LICENSE file.
-
-package workload
-
-import (
- "context"
- "os"
- "strings"
-
- "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
- "github.com/DataDog/datadog-api-client-go/v2/api/datadogV1"
- "github.com/cockroachdb/cockroach/pkg/util/timeutil"
-)
-
-var eventsClient = datadogV1.NewEventsApi(datadog.NewAPIClient(datadog.NewConfiguration()))
-
-// NewDatadogContext adds values to the passed in ctx to configure it to
-// communicate with Datadog. If value of site or apiKey is not provided ctx
-// is returned without any changes.
-func NewDatadogContext(ctx context.Context, site, apiKey string) context.Context {
- if ctx == nil {
- ctx = context.Background()
- }
-
- if site == "" || apiKey == "" {
- return ctx
- }
-
- ctx = context.WithValue(ctx, datadog.ContextAPIKeys, map[string]datadog.APIKey{
- "apiKeyAuth": {
- Key: apiKey,
- },
- })
-
- ctx = context.WithValue(ctx, datadog.ContextServerVariables, map[string]string{
- "site": site,
- })
- return ctx
-}
-
-// EmitDatadogEvent sends an event to Datadog if the passed in ctx has the necessary values to
-// communicate with Datadog.
-func EmitDatadogEvent(
- ctx context.Context, title, text string, eventType datadogV1.EventAlertType, tags string,
-) {
- _, hasAPIKey := ctx.Value(datadog.ContextAPIKeys).(map[string]datadog.APIKey)
- _, hasServerVariables := ctx.Value(datadog.ContextServerVariables).(map[string]string)
- if !hasAPIKey && !hasServerVariables {
- return
- }
-
- hostName, _ := os.Hostname()
- _, _, _ = eventsClient.CreateEvent(ctx, datadogV1.EventCreateRequest{
- AlertType: &eventType,
- DateHappened: datadog.PtrInt64(timeutil.Now().Unix()),
- Host: &hostName,
- SourceTypeName: datadog.PtrString("workload"),
- Tags: getDatadogTags(tags),
- Text: text,
- Title: title,
- })
-}
-
-func getDatadogTags(tags string) []string {
- return strings.Split(tags, ",")
-}
diff --git a/pkg/workload/schemachange/operation_generator.go b/pkg/workload/schemachange/operation_generator.go
index 80c049b9e65a..66fbb25a4d07 100644
--- a/pkg/workload/schemachange/operation_generator.go
+++ b/pkg/workload/schemachange/operation_generator.go
@@ -3025,18 +3025,17 @@ const (
type opStmtQueryResultCallback func(ctx context.Context, rows pgx.Rows) error
-// opStmt encapsulates a generated SQL statement, its type (DDL or DML),
-// expected and potential execution errors, and a callback for handling query results.
+// opStmt a generated statement that is either DDL or DML, including the potential
+// set of execution errors this statement can generate.
type opStmt struct {
// sql the query being executed.
sql string
- // queryType indicates whether the type being executed is DDL or DML.
+ // queryType family of the query type being executed (DML or DDL).
queryType opStmtType
// expectedExecErrors expected set of execution errors.
expectedExecErrors errorCodeSet
// potentialExecErrors errors that could be potentially seen on execution.
potentialExecErrors errorCodeSet
- // queryResultCallback handles the results of the query execution.
queryResultCallback opStmtQueryResultCallback
}
diff --git a/pkg/workload/schemachange/schemachange.go b/pkg/workload/schemachange/schemachange.go
index fb9c5ace269c..8601f004fdbf 100644
--- a/pkg/workload/schemachange/schemachange.go
+++ b/pkg/workload/schemachange/schemachange.go
@@ -401,23 +401,15 @@ var (
errRunInTxnRbkSentinel = errors.New("txn needs to rollback")
)
-// LogEntry is used to log information about the operations performed, expected errors,
-// the worker ID, the corresponding timestamp, and any additional messages or error states.
-// Note: LogEntry and its fields must be public so that the json package can encode this struct.
+// LogEntry and its fields must be public so that the json package can encode this struct.
type LogEntry struct {
- // WorkerID identifies the worker executing the operations.
- WorkerID int `json:"workerId"`
- // ClientTimestamp tracks when the operation was executed.
- ClientTimestamp string `json:"clientTimestamp"`
- // Ops a collection of the various types of operations performed.
- Ops []interface{} `json:"ops"`
- // ExpectedExecErrors errors which occur as soon as you run the statement.
- ExpectedExecErrors string `json:"expectedExecErrors"`
- // ExpectedCommitErrors errors which occur only during commit.
- ExpectedCommitErrors string `json:"expectedCommitErrors"`
+ WorkerID int `json:"workerId"`
+ ClientTimestamp string `json:"clientTimestamp"`
+ Ops []interface{} `json:"ops"`
+ ExpectedExecErrors string `json:"expectedExecErrors"`
+ ExpectedCommitErrors string `json:"expectedCommitErrors"`
// Optional message for errors or if a hook was called.
- Message string `json:"message"`
- // ErrorState holds information on the error's state when an error occurs.
+ Message string `json:"message"`
ErrorState *ErrorState `json:"errorState,omitempty"`
}
diff --git a/scripts/bump-pebble.sh b/scripts/bump-pebble.sh
index d8966b267a5e..83225141d2c9 100755
--- a/scripts/bump-pebble.sh
+++ b/scripts/bump-pebble.sh
@@ -11,8 +11,8 @@
# branch name (e.g. crl-release-23.2, etc.). Also update pebble nightly scripts
# in build/teamcity/cockroach/nightlies to use `@crl-release-xy.z` instead of
# `@master`.
-BRANCH=master
-PEBBLE_BRANCH=master
+BRANCH=release-24.3
+PEBBLE_BRANCH=crl-release-24.3
# This script may be used to produce a branch bumping the Pebble version. The
# storage team bumps CockroachDB's Pebble dependency frequently, and this script