From 9847968a3fce1e3eb121f76f69be55add352779c Mon Sep 17 00:00:00 2001 From: Ricky Stewart Date: Fri, 3 Jun 2022 12:36:52 -0500 Subject: [PATCH 1/9] dev: various improvements to `dev generate cgo` and friends 1. Up until this point, `dev generate go` has not included the `zcgo_flags.go` sources, which has been a point of confusion for people who expect `dev generate go` to generate all the .go files. Now `dev generate go` includes `cgo` as well, and there is a new target `dev generate go_nocgo` that does what `dev generate go` used to do. 2. Now `dev generate cgo` is conscious of where `force_build_cdeps` is set. If it is, then we make sure not to check in one of the pre-archived locations. To this end we add a `test_force_build_cdeps` target that `dev generate cgo` builds. Release note: None --- build/bazelutil/BUILD.bazel | 10 +++ build/toolchains/BUILD.bazel | 5 +- pkg/cmd/dev/generate.go | 94 +++++++++++++++--------- pkg/cmd/dev/testdata/datadriven/generate | 4 +- pkg/gen/genbzl/targets.go | 1 + 5 files changed, 78 insertions(+), 36 deletions(-) diff --git a/build/bazelutil/BUILD.bazel b/build/bazelutil/BUILD.bazel index 926a064dbc6a..56da28150240 100644 --- a/build/bazelutil/BUILD.bazel +++ b/build/bazelutil/BUILD.bazel @@ -22,6 +22,16 @@ analysis_test( ), ) +# The output file will be empty unless we're using the force_build_cdeps config. +genrule( + name = "test_force_build_cdeps", + outs = ["test_force_build_cdeps.txt"], + cmd = select({ + "//build/toolchains:force_build_cdeps": "echo 1 > $@", + "//conditions:default": "touch $@", + }), +) + lint_binary( name = "lint", test = "//pkg/testutils/lint:lint_test", diff --git a/build/toolchains/BUILD.bazel b/build/toolchains/BUILD.bazel index a3603fc23f78..625b50a13007 100644 --- a/build/toolchains/BUILD.bazel +++ b/build/toolchains/BUILD.bazel @@ -463,7 +463,10 @@ config_setting( flag_values = { ":force_build_cdeps_flag": "true", }, - visibility = ["//c-deps:__pkg__"], + visibility = [ + "//build/bazelutil:__pkg__", + "//c-deps:__pkg__", + ], ) bool_flag( diff --git a/pkg/cmd/dev/generate.go b/pkg/cmd/dev/generate.go index 5e05ae46f672..82b7df6c19fc 100644 --- a/pkg/cmd/dev/generate.go +++ b/pkg/cmd/dev/generate.go @@ -37,7 +37,8 @@ func makeGenerateCmd(runE func(cmd *cobra.Command, args []string) error) *cobra. dev generate bazel # DEPS.bzl and BUILD.bazel files dev generate cgo # files that help non-Bazel systems (IDEs, go) link to our C dependencies dev generate docs # generates documentation - dev generate go # generates go code (execgen, stringer, protobufs, etc.) + dev generate go # generates go code (execgen, stringer, protobufs, etc.), plus everything 'cgo' generates + dev generate go_nocgo # generates go code (execgen, stringer, protobufs, etc.) dev generate protobuf # *.pb.go files (subset of 'dev generate go') `, Args: cobra.MinimumNArgs(0), @@ -64,48 +65,45 @@ var archivedCdepConfigurations = []configuration{ {"windows", "amd64"}, } -// archivedCdepConfig returns eturn the cross config string associated with the -// current machine configuration (e.g. "macosarm"). -func archivedCdepConfig() string { - for _, config := range archivedCdepConfigurations { - if config.Os == runtime.GOOS && config.Arch == runtime.GOARCH { - ret := config.Os - if ret == "darwin" { - ret = "macos" - } - if config.Arch == "arm64" { - ret += "arm" - } - return ret - } - } - return "" -} - func (d *dev) generate(cmd *cobra.Command, targets []string) error { var generatorTargetMapping = map[string]func(cmd *cobra.Command) error{ "bazel": d.generateBazel, "cgo": d.generateCgo, "docs": d.generateDocs, "go": d.generateGo, + "go_nocgo": d.generateGoNoCgo, "protobuf": d.generateProtobuf, } if len(targets) == 0 { - targets = append(targets, "bazel", "go", "docs", "cgo") + targets = append(targets, "bazel", "go_nocgo", "docs", "cgo") } targetsMap := make(map[string]struct{}) for _, target := range targets { targetsMap[target] = struct{}{} } - _, includesGo := targetsMap["go"] - _, includesDocs := targetsMap["docs"] - if includesGo && includesDocs { - delete(targetsMap, "go") - delete(targetsMap, "docs") - if err := d.generateGoAndDocs(cmd); err != nil { - return err + { + // In this case, generating both go and cgo would duplicate work. + // Generate go_nocgo instead. + _, includesGo := targetsMap["go"] + _, includesCgo := targetsMap["cgo"] + if includesGo && includesCgo { + delete(targetsMap, "go") + targetsMap["go_nocgo"] = struct{}{} + } + } + { + // generateGoAndDocs is a faster way to generate both (non-cgo) + // go code as well as the docs + _, includesGonocgo := targetsMap["go_nocgo"] + _, includesDocs := targetsMap["docs"] + if includesGonocgo && includesDocs { + delete(targetsMap, "go_nocgo") + delete(targetsMap, "docs") + if err := d.generateGoAndDocs(cmd); err != nil { + return err + } } } @@ -162,6 +160,13 @@ func (d *dev) generateGoAndDocs(cmd *cobra.Command) error { } func (d *dev) generateGo(cmd *cobra.Command) error { + if err := d.generateGoNoCgo(cmd); err != nil { + return err + } + return d.generateCgo(cmd) +} + +func (d *dev) generateGoNoCgo(cmd *cobra.Command) error { return d.generateTarget(cmd.Context(), "//pkg/gen:code") } @@ -199,7 +204,7 @@ func (d *dev) generateRedactSafe(ctx context.Context) error { func (d *dev) generateCgo(cmd *cobra.Command) error { ctx := cmd.Context() - args := []string{"build", "//c-deps:libjemalloc", "//c-deps:libproj"} + args := []string{"build", "//build/bazelutil:test_force_build_cdeps", "//c-deps:libjemalloc", "//c-deps:libproj"} if runtime.GOOS == "linux" { args = append(args, "//c-deps:libkrb5") } @@ -211,6 +216,11 @@ func (d *dev) generateCgo(cmd *cobra.Command) error { if err != nil { return err } + bazelBin, err := d.getBazelBin(ctx) + if err != nil { + return err + } + const cgoTmpl = `// GENERATED FILE DO NOT EDIT package {{ .Package }} @@ -221,8 +231,30 @@ import "C" ` tpl := template.Must(template.New("source").Parse(cgoTmpl)) + var archived string + // If force_build_cdeps is set then the prebuilt libraries won't be in + // the archived location anyway. + forceBuildCdeps, err := d.os.ReadFile(filepath.Join(bazelBin, "build", "bazelutil", "test_force_build_cdeps.txt")) + if err != nil { + return err + } + // force_build_cdeps is activated if the length of this file is not 0. + if len(forceBuildCdeps) == 0 { + for _, config := range archivedCdepConfigurations { + if config.Os == runtime.GOOS && config.Arch == runtime.GOARCH { + archived = config.Os + if archived == "darwin" { + archived = "macos" + } + if config.Arch == "arm64" { + archived += "arm" + } + } + } + } + + // Figure out where to find the c-deps libraries. var jemallocDir, projDir, krbDir string - archived := archivedCdepConfig() if archived != "" { execRoot, err := d.getExecutionRoot(ctx) if err != nil { @@ -234,10 +266,6 @@ import "C" krbDir = filepath.Join(execRoot, "external", fmt.Sprintf("archived_cdep_libkrb5_%s", archived)) } } else { - bazelBin, err := d.getBazelBin(ctx) - if err != nil { - return err - } jemallocDir = filepath.Join(bazelBin, "c-deps/libjemalloc_foreign") projDir = filepath.Join(bazelBin, "c-deps/libproj_foreign") if runtime.GOOS == "linux" { diff --git a/pkg/cmd/dev/testdata/datadriven/generate b/pkg/cmd/dev/testdata/datadriven/generate index 2f5b65e94df5..063ae61607f3 100644 --- a/pkg/cmd/dev/testdata/datadriven/generate +++ b/pkg/cmd/dev/testdata/datadriven/generate @@ -18,7 +18,7 @@ export COCKROACH_BAZEL_FORCE_GENERATE=1 crdb-checkout/build/bazelutil/bazel-generate.sh exec -dev generate go +dev generate go_nocgo ---- bazel run //pkg/gen:code @@ -31,7 +31,7 @@ crdb-checkout/build/bazelutil/generate_redact_safe.sh echo "" > crdb-checkout/docs/generated/redact_safe.md exec -dev gen go docs +dev gen go_nocgo docs ---- bazel run //pkg/gen bazel info workspace --color=no diff --git a/pkg/gen/genbzl/targets.go b/pkg/gen/genbzl/targets.go index 9b9425c64b38..333be17f5797 100644 --- a/pkg/gen/genbzl/targets.go +++ b/pkg/gen/genbzl/targets.go @@ -66,6 +66,7 @@ let all = kind("generated file", {{ .All }}) in ($all ^ labels("out", kind("_gomock_prog_gen rule", {{ .All }}))) + filter(".*:.*(-gen|gen-).*", $all) + //pkg/testutils/lint/passes/errcheck:errcheck_excludes.txt + + //build/bazelutil:test_force_build_cdeps.txt + //build/bazelutil:test_stamping.txt + labels("outs", //docs/generated/sql/bnf:svg) `, From 723818e63b7e7d684a376150cc77fcb810594143 Mon Sep 17 00:00:00 2001 From: e-mbrown Date: Tue, 7 Jun 2022 12:34:05 -0400 Subject: [PATCH 2/9] cloud: bump orchestrator to v22.1.1 Release note: None --- cloud/kubernetes/bring-your-own-certs/client.yaml | 2 +- .../bring-your-own-certs/cockroachdb-statefulset.yaml | 2 +- cloud/kubernetes/client-secure.yaml | 2 +- cloud/kubernetes/cluster-init-secure.yaml | 2 +- cloud/kubernetes/cluster-init.yaml | 2 +- cloud/kubernetes/cockroachdb-statefulset-secure.yaml | 2 +- cloud/kubernetes/cockroachdb-statefulset.yaml | 2 +- cloud/kubernetes/multiregion/client-secure.yaml | 2 +- cloud/kubernetes/multiregion/cluster-init-secure.yaml | 2 +- .../kubernetes/multiregion/cockroachdb-statefulset-secure.yaml | 2 +- .../multiregion/eks/cockroachdb-statefulset-secure-eks.yaml | 2 +- .../kubernetes/performance/cockroachdb-daemonset-insecure.yaml | 2 +- cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml | 2 +- .../performance/cockroachdb-statefulset-insecure.yaml | 2 +- .../kubernetes/performance/cockroachdb-statefulset-secure.yaml | 2 +- cloud/kubernetes/v1.6/client-secure.yaml | 2 +- cloud/kubernetes/v1.6/cluster-init-secure.yaml | 2 +- cloud/kubernetes/v1.6/cluster-init.yaml | 2 +- cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml | 2 +- cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml | 2 +- cloud/kubernetes/v1.7/client-secure.yaml | 2 +- cloud/kubernetes/v1.7/cluster-init-secure.yaml | 2 +- cloud/kubernetes/v1.7/cluster-init.yaml | 2 +- cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml | 2 +- cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml | 2 +- 25 files changed, 25 insertions(+), 25 deletions(-) diff --git a/cloud/kubernetes/bring-your-own-certs/client.yaml b/cloud/kubernetes/bring-your-own-certs/client.yaml index 209fe4111edd..f28d83c85659 100644 --- a/cloud/kubernetes/bring-your-own-certs/client.yaml +++ b/cloud/kubernetes/bring-your-own-certs/client.yaml @@ -20,7 +20,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 # Keep a pod open indefinitely so kubectl exec can be used to get a shell to it # and run cockroach client commands, such as cockroach sql, cockroach node status, etc. command: diff --git a/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml b/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml index 700ca3b21c85..afad18aead65 100644 --- a/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml @@ -153,7 +153,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/client-secure.yaml b/cloud/kubernetes/client-secure.yaml index 6713c56c7346..2ce7fab81eda 100644 --- a/cloud/kubernetes/client-secure.yaml +++ b/cloud/kubernetes/client-secure.yaml @@ -32,7 +32,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/cluster-init-secure.yaml b/cloud/kubernetes/cluster-init-secure.yaml index a3ccd439a267..68552bbc3696 100644 --- a/cloud/kubernetes/cluster-init-secure.yaml +++ b/cloud/kubernetes/cluster-init-secure.yaml @@ -34,7 +34,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/cluster-init.yaml b/cloud/kubernetes/cluster-init.yaml index 056bf4ad5533..29e39b4848a1 100644 --- a/cloud/kubernetes/cluster-init.yaml +++ b/cloud/kubernetes/cluster-init.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/cockroachdb-statefulset-secure.yaml index 740f03501889..4af4716c3b98 100644 --- a/cloud/kubernetes/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/cockroachdb-statefulset-secure.yaml @@ -195,7 +195,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/cockroachdb-statefulset.yaml b/cloud/kubernetes/cockroachdb-statefulset.yaml index b49094ff285f..65b3f9c78df1 100644 --- a/cloud/kubernetes/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/cockroachdb-statefulset.yaml @@ -98,7 +98,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/multiregion/client-secure.yaml b/cloud/kubernetes/multiregion/client-secure.yaml index 8f03a337086a..d2ee434f845f 100644 --- a/cloud/kubernetes/multiregion/client-secure.yaml +++ b/cloud/kubernetes/multiregion/client-secure.yaml @@ -9,7 +9,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/multiregion/cluster-init-secure.yaml b/cloud/kubernetes/multiregion/cluster-init-secure.yaml index a15a68bff25a..aaec59419bf1 100644 --- a/cloud/kubernetes/multiregion/cluster-init-secure.yaml +++ b/cloud/kubernetes/multiregion/cluster-init-secure.yaml @@ -11,7 +11,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml index 491aabd259d3..2fbe90580317 100644 --- a/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml @@ -167,7 +167,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml b/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml index 7d1304417273..df7a3f041d75 100644 --- a/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml +++ b/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml @@ -185,7 +185,7 @@ spec: name: cockroach-env containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml b/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml index d54af6d4da99..c71caf670c6b 100644 --- a/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml @@ -82,7 +82,7 @@ spec: hostNetwork: true containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent # TODO: If you configured taints to give CockroachDB exclusive access to nodes, feel free # to remove the requests and limits sections. If you didn't, you'll need to change these to diff --git a/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml b/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml index a937591a30f4..16e6b224972e 100644 --- a/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml @@ -198,7 +198,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent # TODO: If you configured taints to give CockroachDB exclusive access to nodes, feel free # to remove the requests and limits sections. If you didn't, you'll need to change these to diff --git a/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml b/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml index 867b189667d1..b976d7dcbc74 100644 --- a/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml @@ -141,7 +141,7 @@ spec: - name: cockroachdb # NOTE: Always use the most recent version of CockroachDB for the best # performance and reliability. - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml index 7cb732f50a19..8887ddd1399f 100644 --- a/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml @@ -232,7 +232,7 @@ spec: - name: cockroachdb # NOTE: Always use the most recent version of CockroachDB for the best # performance and reliability. - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/v1.6/client-secure.yaml b/cloud/kubernetes/v1.6/client-secure.yaml index 5b681021f7f7..00454d46ba8f 100644 --- a/cloud/kubernetes/v1.6/client-secure.yaml +++ b/cloud/kubernetes/v1.6/client-secure.yaml @@ -32,7 +32,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.6/cluster-init-secure.yaml b/cloud/kubernetes/v1.6/cluster-init-secure.yaml index 59e94ffea778..a6f6a1bc52d2 100644 --- a/cloud/kubernetes/v1.6/cluster-init-secure.yaml +++ b/cloud/kubernetes/v1.6/cluster-init-secure.yaml @@ -34,7 +34,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.6/cluster-init.yaml b/cloud/kubernetes/v1.6/cluster-init.yaml index d4529c0e04ed..dea43af13805 100644 --- a/cloud/kubernetes/v1.6/cluster-init.yaml +++ b/cloud/kubernetes/v1.6/cluster-init.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml index ffa8601615ac..5ed2ca9a2a7e 100644 --- a/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml @@ -178,7 +178,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml b/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml index da605cbf985e..2cbe3b293cbe 100644 --- a/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml @@ -81,7 +81,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.7/client-secure.yaml b/cloud/kubernetes/v1.7/client-secure.yaml index 86ce7935c95c..93b3bdc5f837 100644 --- a/cloud/kubernetes/v1.7/client-secure.yaml +++ b/cloud/kubernetes/v1.7/client-secure.yaml @@ -32,7 +32,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.7/cluster-init-secure.yaml b/cloud/kubernetes/v1.7/cluster-init-secure.yaml index 119174107db5..df82428b5675 100644 --- a/cloud/kubernetes/v1.7/cluster-init-secure.yaml +++ b/cloud/kubernetes/v1.7/cluster-init-secure.yaml @@ -34,7 +34,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.7/cluster-init.yaml b/cloud/kubernetes/v1.7/cluster-init.yaml index 67da5c3aedfa..00aa97a60764 100644 --- a/cloud/kubernetes/v1.7/cluster-init.yaml +++ b/cloud/kubernetes/v1.7/cluster-init.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml index 315bbd131f0f..c8b53b36eb49 100644 --- a/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml @@ -190,7 +190,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml b/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml index be7a4b4ad021..2d687b3b2750 100644 --- a/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml @@ -93,7 +93,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.0 + image: cockroachdb/cockroach:v22.1.1 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 From 24d490e3694d301dd4e42a3a763940b62edb2d1e Mon Sep 17 00:00:00 2001 From: Shiranka Miskin Date: Fri, 29 Apr 2022 11:04:50 -0400 Subject: [PATCH 3/9] changefeedccl: update tests to random tenant random sink Previously most of our tests did not run on tenants as it was not ergonomic to do so given our helpers. We would also manually run tests across multiple sinks even if the test did not care about what sink it was ran on, drastically increasing the execution time of our test suite. This PR updates our helper infrastructure to use a shared TestServer struct that allows access to both the system and secondary tenant interfaces, and by default runs tests on a random sink. It also fixes a bug in the Webhook and Cloudstorage testfeeds where >53bit numbers were rounded off due to JSON parsing. Release note: None --- pkg/ccl/changefeedccl/BUILD.bazel | 6 - .../changefeedccl/alter_changefeed_test.go | 176 +-- pkg/ccl/changefeedccl/cdctest/BUILD.bazel | 1 - pkg/ccl/changefeedccl/cdctest/testfeed.go | 3 - pkg/ccl/changefeedccl/changefeed_test.go | 1145 +++++++---------- pkg/ccl/changefeedccl/encoder_test.go | 62 +- .../changefeedccl/helpers_tenant_shim_test.go | 121 -- pkg/ccl/changefeedccl/helpers_test.go | 446 ++++--- pkg/ccl/changefeedccl/nemeses_test.go | 12 +- .../show_changefeed_jobs_test.go | 19 +- pkg/ccl/changefeedccl/testfeed_test.go | 48 +- pkg/ccl/changefeedccl/validations_test.go | 13 +- pkg/server/testserver.go | 71 +- pkg/testutils/serverutils/test_tenant_shim.go | 4 + 14 files changed, 970 insertions(+), 1157 deletions(-) delete mode 100644 pkg/ccl/changefeedccl/helpers_tenant_shim_test.go diff --git a/pkg/ccl/changefeedccl/BUILD.bazel b/pkg/ccl/changefeedccl/BUILD.bazel index cd41fc5367e9..46f84a449e2c 100644 --- a/pkg/ccl/changefeedccl/BUILD.bazel +++ b/pkg/ccl/changefeedccl/BUILD.bazel @@ -138,7 +138,6 @@ go_test( "changefeed_test.go", "encoder_test.go", "event_processing_test.go", - "helpers_tenant_shim_test.go", "helpers_test.go", "main_test.go", "name_test.go", @@ -173,7 +172,6 @@ go_test( "//pkg/cloud", "//pkg/cloud/impl:cloudimpl", "//pkg/clusterversion", - "//pkg/config", "//pkg/gossip", "//pkg/jobs", "//pkg/jobs/jobspb", @@ -182,17 +180,14 @@ go_test( "//pkg/kv/kvclient/kvcoord", "//pkg/kv/kvserver", "//pkg/kv/kvserver/kvserverbase", - "//pkg/kv/kvserver/liveness/livenesspb", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/roachpb", - "//pkg/rpc", "//pkg/security/securityassets", "//pkg/security/securitytest", "//pkg/security/username", "//pkg/server", "//pkg/server/serverpb", - "//pkg/server/status", "//pkg/server/telemetry", "//pkg/settings/cluster", "//pkg/spanconfig", @@ -241,7 +236,6 @@ go_test( "//pkg/util/randutil", "//pkg/util/retry", "//pkg/util/span", - "//pkg/util/stop", "//pkg/util/syncutil", "//pkg/util/timeutil", "//pkg/util/timeutil/pgdate", diff --git a/pkg/ccl/changefeedccl/alter_changefeed_test.go b/pkg/ccl/changefeedccl/alter_changefeed_test.go index 81d05b5d9d24..aa9e9ba1679f 100644 --- a/pkg/ccl/changefeedccl/alter_changefeed_test.go +++ b/pkg/ccl/changefeedccl/alter_changefeed_test.go @@ -10,7 +10,6 @@ package changefeedccl import ( "context" - gosql "database/sql" "fmt" "sync/atomic" "testing" @@ -23,7 +22,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -45,8 +43,8 @@ func TestAlterChangefeedAddTarget(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -75,15 +73,15 @@ func TestAlterChangefeedAddTarget(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedAddTargetFamily(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, FAMILY onlya (a), FAMILY onlyb (b))`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo FAMILY onlya`) @@ -113,15 +111,16 @@ func TestAlterChangefeedAddTargetFamily(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + // TODO: Figure out why this freezes on other sinks (ex: webhook) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAlterChangefeedSwitchFamily(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, FAMILY onlya (a), FAMILY onlyb (b))`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo FAMILY onlya`) @@ -150,15 +149,16 @@ func TestAlterChangefeedSwitchFamily(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + // TODO: Figure out why this freezes on other sinks (ex: cloudstorage) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAlterChangefeedDropTarget(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -185,15 +185,15 @@ func TestAlterChangefeedDropTarget(t *testing.T) { assertPayloads(t, testFeed, nil) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedDropTargetFamily(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, FAMILY onlya (a), FAMILY onlyb (b))`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo FAMILY onlya, foo FAMILY onlyb`) @@ -219,15 +219,15 @@ func TestAlterChangefeedDropTargetFamily(t *testing.T) { } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedSetDiffOption(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`) @@ -250,15 +250,15 @@ func TestAlterChangefeedSetDiffOption(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedUnsetDiffOption(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH diff`) @@ -281,15 +281,16 @@ func TestAlterChangefeedUnsetDiffOption(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + // TODO: Figure out why this fails on other sinks + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAlterChangefeedErrors(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`) @@ -372,15 +373,15 @@ func TestAlterChangefeedErrors(t *testing.T) { ) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedDropAllTargetsError(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -399,15 +400,15 @@ func TestAlterChangefeedDropAllTargetsError(t *testing.T) { ) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedTelemetry(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -435,7 +436,7 @@ func TestAlterChangefeedTelemetry(t *testing.T) { require.Equal(t, int32(1), counts[`changefeed.alter.unset_options.1`]) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } // The purpose of this test is to ensure that the ALTER CHANGEFEED statement @@ -497,8 +498,9 @@ func TestAlterChangefeedChangeSinkTypeError(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) + sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`) @@ -516,18 +518,18 @@ func TestAlterChangefeedChangeSinkTypeError(t *testing.T) { ) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAlterChangefeedChangeSinkURI(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - registry := f.Server().JobRegistry().(*jobs.Registry) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + registry := s.Server.JobRegistry().(*jobs.Registry) ctx := context.Background() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`) @@ -554,19 +556,19 @@ func TestAlterChangefeedChangeSinkURI(t *testing.T) { require.Equal(t, newSinkURI, details.SinkURI) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAlterChangefeedAddTargetErrors(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo (a) SELECT * FROM generate_series(1, 1000)`) - knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs. + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -621,7 +623,7 @@ func TestAlterChangefeedAddTargetErrors(t *testing.T) { // Wait for the high water mark to be non-zero. testutils.SucceedsSoon(t, func() error { - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) job, err := registry.LoadJob(context.Background(), feed.JobID()) require.NoError(t, err) prog := job.Progress() @@ -643,15 +645,15 @@ func TestAlterChangefeedAddTargetErrors(t *testing.T) { ) } - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedDatabaseQualifiedNames(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING)`) sqlDB.Exec(t, `CREATE TABLE movr.users (id INT PRIMARY KEY, name STRING)`) @@ -692,15 +694,15 @@ func TestAlterChangefeedDatabaseQualifiedNames(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedDatabaseScope(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE DATABASE new_movr`) @@ -737,15 +739,15 @@ func TestAlterChangefeedDatabaseScope(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedDatabaseScopeUnqualifiedName(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE DATABASE new_movr`) @@ -786,15 +788,15 @@ func TestAlterChangefeedDatabaseScopeUnqualifiedName(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedColumnFamilyDatabaseScope(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING, FAMILY onlyid (id), FAMILY onlyname (name))`) @@ -831,15 +833,15 @@ func TestAlterChangefeedColumnFamilyDatabaseScope(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedAlterTableName(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE TABLE movr.users (id INT PRIMARY KEY, name STRING)`) sqlDB.Exec(t, @@ -888,15 +890,15 @@ func TestAlterChangefeedAlterTableName(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedInitialScan(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1), (2), (3)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -930,15 +932,15 @@ func TestAlterChangefeedInitialScan(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedNoInitialScan(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1), (2), (3)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -973,7 +975,7 @@ func TestAlterChangefeedNoInitialScan(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedAddTargetsDuringSchemaChangeError(t *testing.T) { @@ -982,11 +984,11 @@ func TestAlterChangefeedAddTargetsDuringSchemaChangeError(t *testing.T) { rnd, _ := randutil.NewPseudoRand() - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) - knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs. + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -1005,7 +1007,7 @@ func TestAlterChangefeedAddTargetsDuringSchemaChangeError(t *testing.T) { testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved = '1s', no_initial_scan`) jobFeed := testFeed.(cdctest.EnterpriseTestFeed) - jobRegistry := f.Server().JobRegistry().(*jobs.Registry) + jobRegistry := s.Server.JobRegistry().(*jobs.Registry) // Kafka feeds are not buffered, so we have to consume messages. g := ctxgroup.WithContext(context.Background()) @@ -1045,13 +1047,13 @@ func TestAlterChangefeedAddTargetsDuringSchemaChangeError(t *testing.T) { var maxCheckpointSize int64 = 100 << 20 // Checkpoint progress frequently, and set the checkpoint size limit. changefeedbase.FrontierCheckpointFrequency.Override( - context.Background(), &f.Server().ClusterSettings().SV, 10*time.Millisecond) + context.Background(), &s.Server.ClusterSettings().SV, 10*time.Millisecond) changefeedbase.FrontierCheckpointMaxBytes.Override( - context.Background(), &f.Server().ClusterSettings().SV, maxCheckpointSize) + context.Background(), &s.Server.ClusterSettings().SV, maxCheckpointSize) // Note the tableSpan to avoid resolved events that leave no gaps fooDesc := desctestutils.TestingGetPublicTableDescriptor( - f.Server().DB(), keys.SystemSQLCodec, "d", "foo") + s.SystemServer.DB(), s.Codec, "d", "foo") tableSpan := fooDesc.PrimaryIndexSpan(keys.SystemSQLCodec) // ShouldSkipResolved should ensure that once the backfill begins, the following resolved events @@ -1118,7 +1120,7 @@ func TestAlterChangefeedAddTargetsDuringSchemaChangeError(t *testing.T) { sqlDB.ExpectErr(t, errMsg, fmt.Sprintf(`ALTER CHANGEFEED %d ADD bar WITH initial_scan`, jobFeed.JobID())) } - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) + cdcTestWithSystem(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedAddTargetsDuringBackfill(t *testing.T) { @@ -1130,8 +1132,8 @@ func TestAlterChangefeedAddTargetsDuringBackfill(t *testing.T) { const maxCheckpointSize = 1 << 20 const numRowsPerTable = 1000 - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo(val INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo (val) SELECT * FROM generate_series(0, $1)`, numRowsPerTable-1) @@ -1139,10 +1141,10 @@ func TestAlterChangefeedAddTargetsDuringBackfill(t *testing.T) { sqlDB.Exec(t, `INSERT INTO bar (val) SELECT * FROM generate_series(0, $1)`, numRowsPerTable-1) fooDesc := desctestutils.TestingGetPublicTableDescriptor( - f.Server().DB(), keys.SystemSQLCodec, "d", "foo") - fooTableSpan := fooDesc.PrimaryIndexSpan(keys.SystemSQLCodec) + s.SystemServer.DB(), s.Codec, "d", "foo") + fooTableSpan := fooDesc.PrimaryIndexSpan(s.Codec) - knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs. + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -1180,11 +1182,11 @@ func TestAlterChangefeedAddTargetsDuringBackfill(t *testing.T) { // Checkpoint progress frequently, and set the checkpoint size limit. changefeedbase.FrontierCheckpointFrequency.Override( - context.Background(), &f.Server().ClusterSettings().SV, 1) + context.Background(), &s.Server.ClusterSettings().SV, 1) changefeedbase.FrontierCheckpointMaxBytes.Override( - context.Background(), &f.Server().ClusterSettings().SV, maxCheckpointSize) + context.Background(), &s.Server.ClusterSettings().SV, maxCheckpointSize) - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved = '100ms'`) g := ctxgroup.WithContext(context.Background()) @@ -1271,15 +1273,15 @@ func TestAlterChangefeedAddTargetsDuringBackfill(t *testing.T) { } } - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) + cdcTestWithSystem(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedUpdateFilter(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`) @@ -1297,7 +1299,7 @@ func TestAlterChangefeedUpdateFilter(t *testing.T) { feed, ok := testFeed.(cdctest.EnterpriseTestFeed) require.True(t, ok) - require.NoError(t, feed.TickHighWaterMark(f.Server().Clock().Now())) + require.NoError(t, feed.TickHighWaterMark(s.Server.Clock().Now())) require.NoError(t, feed.Pause()) // Try to set an invalid filter (column b is not part of primary key). @@ -1320,7 +1322,7 @@ func TestAlterChangefeedUpdateFilter(t *testing.T) { }) // Pause again, clear out filter and verify we get expected values. - require.NoError(t, feed.TickHighWaterMark(f.Server().Clock().Now())) + require.NoError(t, feed.TickHighWaterMark(s.Server.Clock().Now())) require.NoError(t, feed.Pause()) // Set filter to emit a > 4. We expect to see update row 5, and onward. @@ -1342,5 +1344,5 @@ func TestAlterChangefeedUpdateFilter(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } diff --git a/pkg/ccl/changefeedccl/cdctest/BUILD.bazel b/pkg/ccl/changefeedccl/cdctest/BUILD.bazel index 0a866a6aaee9..72f8ee168100 100644 --- a/pkg/ccl/changefeedccl/cdctest/BUILD.bazel +++ b/pkg/ccl/changefeedccl/cdctest/BUILD.bazel @@ -25,7 +25,6 @@ go_library( "//pkg/sql/catalog", "//pkg/sql/catalog/descs", "//pkg/sql/sem/tree", - "//pkg/testutils/serverutils", "//pkg/util", "//pkg/util/fsm", "//pkg/util/hlc", diff --git a/pkg/ccl/changefeedccl/cdctest/testfeed.go b/pkg/ccl/changefeedccl/cdctest/testfeed.go index 3a1835be3d9c..a0dc830a740e 100644 --- a/pkg/ccl/changefeedccl/cdctest/testfeed.go +++ b/pkg/ccl/changefeedccl/cdctest/testfeed.go @@ -13,7 +13,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" ) @@ -21,8 +20,6 @@ import ( type TestFeedFactory interface { // Feed creates a new TestFeed. Feed(create string, args ...interface{}) (TestFeed, error) - // Server returns the raw underlying TestServer, if applicable. - Server() serverutils.TestServerInterface } // TestFeedMessage represents one row update or resolved timestamp message from diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 74cd1d273f9d..5c0b71194e95 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -51,7 +51,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server" - "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" @@ -96,8 +95,8 @@ func TestChangefeedBasics(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`) @@ -128,33 +127,31 @@ func TestChangefeedBasics(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) + cdcTest(t, testFn, feedTestForceSink("enterprise")) + cdcTest(t, testFn, feedTestForceSink("webhook")) + cdcTest(t, testFn, feedTestForceSink("pubsub")) + cdcTest(t, testFn, feedTestForceSink("sinkless")) + cdcTest(t, testFn, feedTestForceSink("cloudstorage")) // NB running TestChangefeedBasics, which includes a DELETE, with // cloudStorageTest is a regression test for #36994. } -// TestChangefeedSendError validates that SendErrors do not fail the changefeed -// as they can occur in normal situations such as a cluster update func TestChangefeedIdleness(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + cdcTest(t, func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) changefeedbase.IdleTimeout.Override( - context.Background(), &f.Server().ClusterSettings().SV, 3*time.Second) + context.Background(), &s.Server.ClusterSettings().SV, 3*time.Second) // Idleness functionality is version gated - knobs := f.Server().TestingKnobs().Server.(*server.TestingKnobs) + knobs := s.TestingKnobs.Server.(*server.TestingKnobs) knobs.BinaryVersionOverride = clusterversion.ByKey(clusterversion.ChangefeedIdleness) - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) currentlyIdle := registry.MetricsStruct().JobMetrics[jobspb.TypeChangefeed].CurrentlyIdle waitForIdleCount := func(numIdle int64) { testutils.SucceedsSoon(t, func() error { @@ -194,24 +191,21 @@ func TestChangefeedIdleness(t *testing.T) { `foo: [0]->{"after": {"a": 0}}`, `foo: [1]->{"after": {"a": 1}}`, }) - } - - // Tenant testing disabled due to TestServerInterface being required - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) + }, feedTestEnterpriseSinks) } +// TestChangefeedSendError validates that SendErrors do not fail the changefeed +// as they can occur in normal situations such as a cluster update func TestChangefeedSendError(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + cdcTest(t, func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0)`) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -236,7 +230,7 @@ func TestChangefeedSendError(t *testing.T) { sqlDB.Exec(t, `INSERT INTO foo VALUES (4)`) // Changefeed should've been retried due to the SendError - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) sli, err := registry.MetricsStruct().Changefeed.(*Metrics).getSLIMetrics(defaultSLIScope) require.NoError(t, err) retryCounter := sli.ErrorRetries @@ -254,21 +248,15 @@ func TestChangefeedSendError(t *testing.T) { `foo: [3]->{"after": {"a": 3}}`, `foo: [4]->{"after": {"a": 4}}`, }) - } - - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + }, feedTestEnterpriseSinks) } func TestChangefeedBasicConfluentKafka(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`) @@ -301,15 +289,15 @@ func TestChangefeedBasicConfluentKafka(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestChangefeedDiff(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`) @@ -346,19 +334,14 @@ func TestChangefeedDiff(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedTenants(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - kvServer, kvSQLdb, cleanup := startTestServer(t, feedTestOptions{argsFn: func(args *base.TestServerArgs) { + kvServer, kvSQLdb, cleanup := startTestFullServer(t, feedTestOptions{argsFn: func(args *base.TestServerArgs) { args.ExternalIODirConfig.DisableOutbound = true }}) defer cleanup() @@ -413,13 +396,11 @@ func TestMissingTableErr(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - _, kvSQLdb, cleanup := startTestServer(t, feedTestOptions{argsFn: func(args *base.TestServerArgs) { - args.ExternalIODirConfig.DisableOutbound = true - }}) + s, cleanup := makeServer(t) defer cleanup() t.Run("changefeed on non existing table fails", func(t *testing.T) { - kvSQL := sqlutils.MakeSQLRunner(kvSQLdb) + kvSQL := sqlutils.MakeSQLRunner(s.DB) kvSQL.ExpectErr(t, `^pq: failed to resolve targets in the CHANGEFEED stmt: table "foo" does not exist$`, `CREATE CHANGEFEED FOR foo`, ) @@ -430,31 +411,17 @@ func TestChangefeedTenantsExternalIOEnabled(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - kvServer, _, cleanup := startTestServer(t, feedTestOptions{argsFn: func(args *base.TestServerArgs) { + s, cleanup := makeTenantServer(t, withArgsFn(func(args *base.TestServerArgs) { args.ExternalIODirConfig.DisableOutbound = true - }}) + })) defer cleanup() - tenantArgs := base.TestTenantArgs{ - // crdb_internal.create_tenant called by StartTenant - TenantID: serverutils.TestTenantID(), - UseDatabase: `d`, - TestingKnobs: base.TestingKnobs{ - DistSQL: &execinfra.TestingKnobs{Changefeed: &TestingKnobs{}}, - JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), - }, - } - - tenantServer, tenantDB := serverutils.StartTenant(t, kvServer, tenantArgs) - tenantSQL := sqlutils.MakeSQLRunner(tenantDB) - tenantSQL.ExecMultiple(t, strings.Split(serverSetupStatements, ";")...) + tenantSQL := sqlutils.MakeSQLRunner(s.DB) tenantSQL.Exec(t, `CREATE TABLE foo_in_tenant (pk INT PRIMARY KEY)`) t.Run("sinkful changefeed works", func(t *testing.T) { - f := makeKafkaFeedFactory(&testServerShim{ - TestTenantInterface: tenantServer, - kvServer: kvServer}, - tenantDB) + f, cleanup := makeFeedFactory(t, "kafka", s.Server, s.DB) + defer cleanup() tenantSQL.Exec(t, `INSERT INTO foo_in_tenant VALUES (1)`) feed := feed(t, f, `CREATE CHANGEFEED FOR foo_in_tenant`) defer closeFeed(t, feed) @@ -468,8 +435,8 @@ func TestChangefeedEnvelope(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a')`) @@ -500,17 +467,16 @@ func TestChangefeedEnvelope(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + // some sinks are incompatible with envelope + cdcTest(t, testFn, feedTestRestrictSinks("sinkless", "enterprise", "kafka")) } func TestChangefeedFullTableName(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + cdcTest(t, func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a')`) @@ -519,22 +485,15 @@ func TestChangefeedFullTableName(t *testing.T) { defer closeFeed(t, foo) assertPayloads(t, foo, []string{`d.public.foo: [1]->{"after": {"a": 1, "b": "a"}}`}) }) - } - - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + }) } func TestChangefeedMultiTable(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a')`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY, b STRING)`) @@ -549,19 +508,15 @@ func TestChangefeedMultiTable(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedCursor(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) // To make sure that these timestamps are after 'before' and before @@ -611,20 +566,16 @@ func TestChangefeedCursor(t *testing.T) { } } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedTimestamps(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { ctx := context.Background() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0)`) @@ -655,7 +606,7 @@ func TestChangefeedTimestamps(t *testing.T) { // Assert the remaining key using assertPayloads, since we know the exact // timestamp expected. var ts1 string - if err := crdb.ExecuteTx(ctx, db, nil /* txopts */, func(tx *gosql.Tx) error { + if err := crdb.ExecuteTx(ctx, s.DB, nil /* txopts */, func(tx *gosql.Tx) error { return tx.QueryRow( `INSERT INTO foo VALUES (1) RETURNING cluster_logical_timestamp()`, ).Scan(&ts1) @@ -675,19 +626,15 @@ func TestChangefeedTimestamps(t *testing.T) { } } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedMVCCTimestamps(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE mvcc_timestamp_test_table (id UUID PRIMARY KEY DEFAULT gen_random_uuid())`) rowCount := 5 @@ -707,19 +654,15 @@ func TestChangefeedMVCCTimestamps(t *testing.T) { assertPayloads(t, changeFeed, expectedPayloads) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedResolvedFrequency(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) const freq = 10 * time.Millisecond @@ -741,11 +684,7 @@ func TestChangefeedResolvedFrequency(t *testing.T) { } } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } // Test how Changefeeds react to schema changes that do not require a backfill @@ -764,8 +703,8 @@ func TestChangefeedInitialScan(t *testing.T) { `cursor - with initial backfill`: `CREATE CHANGEFEED FOR initial_scan WITH initial_scan = 'yes', resolved='1s', cursor='%s'`, } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) for testName, changefeedStmt := range noInitialScanTests { t.Run(testName, func(t *testing.T) { @@ -812,25 +751,18 @@ func TestChangefeedInitialScan(t *testing.T) { } } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedBackfillObservability(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) - - knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs. - DistSQL.(*execinfra.TestingKnobs). - Changefeed.(*TestingKnobs) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) - registry := f.Server().JobRegistry().(*jobs.Registry) + knobs := s.TestingKnobs.DistSQL.(*execinfra.TestingKnobs).Changefeed.(*TestingKnobs) + registry := s.Server.JobRegistry().(*jobs.Registry) sli, err := registry.MetricsStruct().Changefeed.(*Metrics).getSLIMetrics(defaultSLIScope) require.NoError(t, err) pendingRanges := sli.BackfillPendingRanges @@ -887,13 +819,14 @@ func TestChangefeedBackfillObservability(t *testing.T) { }) } - t.Run("enterprise", enterpriseTest(testFn, feedTestNoTenants)) + // Can't run on tenants due to lack of SPLIT AT support (#54254) + cdcTest(t, testFn, feedTestNoTenants, feedTestEnterpriseSinks) } func TestChangefeedUserDefinedTypes(t *testing.T) { defer leaktest.AfterTest(t)() - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // Set up a type and table. sqlDB.Exec(t, `CREATE TYPE t AS ENUM ('hello', 'howdy', 'hi')`) @@ -946,11 +879,7 @@ func TestChangefeedUserDefinedTypes(t *testing.T) { } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedExternalIODisabled(t *testing.T) { @@ -988,17 +917,16 @@ func TestChangefeedExternalIODisabled(t *testing.T) { }) withDisabledOutbound := func(args *base.TestServerArgs) { args.ExternalIODirConfig.DisableOutbound = true } - t.Run("sinkless changfeeds are allowed with disabled external io", - sinklessTest(func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, "CREATE TABLE target_table (pk INT PRIMARY KEY)") - sqlDB.Exec(t, "INSERT INTO target_table VALUES (1)") - feed := feed(t, f, "CREATE CHANGEFEED FOR target_table") - defer closeFeed(t, feed) - assertPayloads(t, feed, []string{ - `target_table: [1]->{"after": {"pk": 1}}`, - }) - }, withArgsFn(withDisabledOutbound))) + cdcTestNamed(t, "sinkless changfeeds are allowed with disabled external io", func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) + sqlDB.Exec(t, "CREATE TABLE target_table (pk INT PRIMARY KEY)") + sqlDB.Exec(t, "INSERT INTO target_table VALUES (1)") + feed := feed(t, f, "CREATE CHANGEFEED FOR target_table") + defer closeFeed(t, feed) + assertPayloads(t, feed, []string{ + `target_table: [1]->{"after": {"pk": 1}}`, + }) + }, feedTestForceSink("sinkless"), withArgsFn(withDisabledOutbound)) } // Test how Changefeeds react to schema changes that do not require a backfill @@ -1008,8 +936,8 @@ func TestChangefeedSchemaChangeNoBackfill(t *testing.T) { defer log.Scope(t).Close(t) skip.UnderRace(t, "takes >1 min under race") - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // Schema changes that predate the changefeed. @@ -1181,11 +1109,8 @@ func TestChangefeedSchemaChangeNoBackfill(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) @@ -1204,7 +1129,7 @@ func TestChangefeedLaggingSpanCheckpointing(t *testing.T) { defer log.Scope(t).Close(t) rnd, _ := randutil.NewPseudoRand() - s, db, stopServer := startTestServer(t, feedTestOptions{}) + s, db, stopServer := startTestFullServer(t, feedTestOptions{}) defer stopServer() sqlDB := sqlutils.MakeSQLRunner(db) @@ -1338,11 +1263,11 @@ func TestChangefeedSchemaChangeBackfillCheckpoint(t *testing.T) { // truncation var maxCheckpointSize int64 = 100 << 20 - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) - knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs. + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -1360,7 +1285,7 @@ func TestChangefeedSchemaChangeBackfillCheckpoint(t *testing.T) { // Setup changefeed job details, avoid relying on initial scan functionality baseFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved='100ms', no_initial_scan`) jobFeed := baseFeed.(cdctest.EnterpriseTestFeed) - jobRegistry := f.Server().JobRegistry().(*jobs.Registry) + jobRegistry := s.Server.JobRegistry().(*jobs.Registry) // Ensure events are consumed for sinks that don't buffer (ex: Kafka) g := ctxgroup.WithContext(context.Background()) @@ -1399,14 +1324,14 @@ func TestChangefeedSchemaChangeBackfillCheckpoint(t *testing.T) { // Checkpoint progress frequently, and set the checkpoint size limit. changefeedbase.FrontierCheckpointFrequency.Override( - context.Background(), &f.Server().ClusterSettings().SV, 10*time.Millisecond) + context.Background(), &s.Server.ClusterSettings().SV, 10*time.Millisecond) changefeedbase.FrontierCheckpointMaxBytes.Override( - context.Background(), &f.Server().ClusterSettings().SV, maxCheckpointSize) + context.Background(), &s.Server.ClusterSettings().SV, maxCheckpointSize) // Note the tableSpan to avoid resolved events that leave no gaps fooDesc := desctestutils.TestingGetPublicTableDescriptor( - f.Server().DB(), keys.SystemSQLCodec, "d", "foo") - tableSpan := fooDesc.PrimaryIndexSpan(keys.SystemSQLCodec) + s.SystemServer.DB(), s.Codec, "d", "foo") + tableSpan := fooDesc.PrimaryIndexSpan(s.Codec) // ShouldSkipResolved should ensure that once the backfill begins, the following resolved events // that are for that backfill (are of the timestamp right after the backfill timestamp) resolve some @@ -1554,10 +1479,7 @@ func TestChangefeedSchemaChangeBackfillCheckpoint(t *testing.T) { } } - // TODO(ssd): Tenant testing disabled because of use of DB() - t.Run("enterprise", enterpriseTest(testFn, feedTestNoTenants)) - t.Run("cloudstorage", cloudStorageTest(testFn, feedTestNoTenants)) - t.Run("kafka", kafkaTest(testFn, feedTestNoTenants)) + cdcTestWithSystem(t, testFn, feedTestEnterpriseSinks) log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, @@ -1576,8 +1498,8 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // Expected semantics: @@ -1609,7 +1531,7 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { `add_column_def: [2]->{"after": {"a": 2}}`, }) sqlDB.Exec(t, `ALTER TABLE add_column_def ADD COLUMN b STRING DEFAULT 'd'`) - ts := fetchDescVersionModificationTime(t, db, f, `add_column_def`, 4) + ts := fetchDescVersionModificationTime(t, s, `add_column_def`, 4) // Schema change backfill assertPayloadsStripTs(t, addColumnDef, []string{ `add_column_def: [1]->{"after": {"a": 1}}`, @@ -1639,7 +1561,7 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { `add_col_comp: [1]->{"after": {"a": 1, "b": 6}}`, `add_col_comp: [2]->{"after": {"a": 2, "b": 7}}`, }) - ts := fetchDescVersionModificationTime(t, db, f, `add_col_comp`, 4) + ts := fetchDescVersionModificationTime(t, s, `add_col_comp`, 4) assertPayloads(t, addColComp, []string{ fmt.Sprintf(`add_col_comp: [1]->{"after": {"a": 1, "b": 6, "c": 11}, "updated": "%s"}`, ts.AsOfSystemTime()), @@ -1689,7 +1611,7 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { wg.Wait() return nil } - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.BeforeEmitRow = waitSinkHook @@ -1731,7 +1653,7 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { `multiple_alters: [1]->{"after": {"a": 1, "c": "cee"}}`, `multiple_alters: [2]->{"after": {"a": 2, "c": "cee"}}`, }) - ts := fetchDescVersionModificationTime(t, db, f, `multiple_alters`, 10) + ts := fetchDescVersionModificationTime(t, s, `multiple_alters`, 10) // Changefeed level backfill for ADD COLUMN d. assertPayloads(t, multipleAlters, []string{ // Backfill no-ops for column D (C schema change is complete) @@ -1743,13 +1665,8 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { }) } - // TODO(ssd): tenant tests skipped because of f.Server() use - // in fetchDescVersionModificationTime - t.Run(`sinkless`, sinklessTest(testFn, feedTestNoTenants)) - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) - t.Run(`pubsub`, pubsubTest(testFn, feedTestNoTenants)) + cdcTestWithSystem(t, testFn) + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) @@ -1766,8 +1683,8 @@ func TestChangefeedSchemaChangeBackfillScope(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) t.Run(`add column with default`, func(t *testing.T) { @@ -1784,7 +1701,7 @@ func TestChangefeedSchemaChangeBackfillScope(t *testing.T) { `no_def_change: [3]->{"after": {"a": 3}}`, }) sqlDB.Exec(t, `ALTER TABLE add_column_def ADD COLUMN b STRING DEFAULT 'd'`) - ts := fetchDescVersionModificationTime(t, db, f, `add_column_def`, 4) + ts := fetchDescVersionModificationTime(t, s, `add_column_def`, 4) // Schema change backfill assertPayloadsStripTs(t, combinedFeed, []string{ `add_column_def: [1]->{"after": {"a": 1}}`, @@ -1801,13 +1718,7 @@ func TestChangefeedSchemaChangeBackfillScope(t *testing.T) { } - // TODO(ssd): tenant tests skipped because of f.Server() use - // in fetchDescVerionModifationTime - t.Run(`sinkless`, sinklessTest(testFn, feedTestNoTenants)) - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) - t.Run(`pubsub`, pubsubTest(testFn, feedTestNoTenants)) + cdcTestWithSystem(t, testFn) log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) @@ -1822,14 +1733,14 @@ func TestChangefeedSchemaChangeBackfillScope(t *testing.T) { // fetchDescVersionModificationTime fetches the `ModificationTime` of the specified // `version` of `tableName`'s table descriptor. func fetchDescVersionModificationTime( - t testing.TB, db *gosql.DB, f cdctest.TestFeedFactory, tableName string, version int, + t testing.TB, s TestServerWithSystem, tableName string, version int, ) hlc.Timestamp { - tblKey := keys.SystemSQLCodec.TablePrefix(keys.DescriptorTableID) + tblKey := s.Codec.TablePrefix(keys.DescriptorTableID) header := roachpb.RequestHeader{ Key: tblKey, EndKey: tblKey.PrefixEnd(), } - dropColTblID := sqlutils.QueryTableID(t, db, `d`, "public", tableName) + dropColTblID := sqlutils.QueryTableID(t, s.DB, `d`, "public", tableName) req := &roachpb.ExportRequest{ RequestHeader: header, MVCCFilter: roachpb.MVCCFilter_All, @@ -1839,7 +1750,7 @@ func fetchDescVersionModificationTime( clock := hlc.NewClockWithSystemTimeSource(time.Minute /* maxOffset */) hh := roachpb.Header{Timestamp: clock.Now()} res, pErr := kv.SendWrappedWith(context.Background(), - f.Server().DB().NonTransactionalSender(), hh, req) + s.SystemServer.DB().NonTransactionalSender(), hh, req) if pErr != nil { t.Fatal(pErr.GoError()) } @@ -1856,7 +1767,7 @@ func fetchDescVersionModificationTime( continue } k := it.UnsafeKey() - remaining, _, _, err := keys.SystemSQLCodec.DecodeIndexPrefix(k.Key) + remaining, _, _, err := s.Codec.DecodeIndexPrefix(k.Key) if err != nil { t.Fatal(err) } @@ -1892,8 +1803,8 @@ func TestChangefeedAfterSchemaChangeBackfill(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE after_backfill (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO after_backfill VALUES (0)`) sqlDB.Exec(t, `ALTER TABLE after_backfill ADD COLUMN b INT DEFAULT 1`) @@ -1906,11 +1817,7 @@ func TestChangefeedAfterSchemaChangeBackfill(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) @@ -1926,9 +1833,9 @@ func TestChangefeedEachColumnFamily(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) // Table with 2 column families. sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, c STRING, FAMILY most (a,b), FAMILY only_c (c))`) @@ -1990,21 +1897,15 @@ func TestChangefeedEachColumnFamily(t *testing.T) { } } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedSingleColumnFamily(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) // Table with 2 column families. sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, c STRING, FAMILY most (a,b), FAMILY rest (c))`) @@ -2036,10 +1937,7 @@ func TestChangefeedSingleColumnFamily(t *testing.T) { `foo.rest: [1]->{"after": {"c": "cent"}}`, }) } - - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedSingleColumnFamilySchemaChanges(t *testing.T) { @@ -2050,9 +1948,8 @@ func TestChangefeedSingleColumnFamilySchemaChanges(t *testing.T) { skip.UnderStress(t) skip.UnderRace(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // Table with 2 column families. @@ -2083,18 +1980,15 @@ func TestChangefeedSingleColumnFamilySchemaChanges(t *testing.T) { regexp.MustCompile(`CHANGEFEED targeting nonexistent or removed column family rest of table foo`)) } - - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedEachColumnFamilySchemaChanges(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // Table with 2 column families. @@ -2122,20 +2016,16 @@ func TestChangefeedEachColumnFamilySchemaChanges(t *testing.T) { assertPayloads(t, foo, []string{ `foo.f3: [0]->{"after": {"e": "hello"}}`, }) - } - - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedColumnFamilyAvro(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, c STRING, FAMILY most (a,b), FAMILY justc (c))`) sqlDB.Exec(t, `INSERT INTO foo values (0, 'dog', 'cat')`) @@ -2145,9 +2035,8 @@ func TestChangefeedColumnFamilyAvro(t *testing.T) { `foo.most: {"a":{"long":0}}->{"after":{"foo_u002e_most":{"a":{"long":0},"b":{"string":"dog"}}}}`, `foo.justc: {"a":{"long":0}}->{"after":{"foo_u002e_justc":{"c":{"string":"cat"}}}}`, }) - } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestChangefeedAuthorization(t *testing.T) { @@ -2171,9 +2060,9 @@ func TestChangefeedAuthorization(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - s, db, stop := startTestServer(t, feedTestOptions{}) + s, stop := makeServer(t) defer stop() - rootDB := sqlutils.MakeSQLRunner(db) + rootDB := sqlutils.MakeSQLRunner(s.DB) rootDB.Exec(t, `create user guest with password 'password'`) rootDB.Exec(t, `create user feedcreator with controlchangefeed password 'hunter2'`) @@ -2181,7 +2070,7 @@ func TestChangefeedAuthorization(t *testing.T) { pgURL := url.URL{ Scheme: "postgres", User: url.UserPassword(`guest`, `password`), - Host: s.ServingSQLAddr(), + Host: s.Server.SQLAddr(), } db2, err := gosql.Open("postgres", pgURL.String()) @@ -2192,7 +2081,7 @@ func TestChangefeedAuthorization(t *testing.T) { pgURL = url.URL{ Scheme: "postgres", User: url.UserPassword(`feedcreator`, `hunter2`), - Host: s.ServingSQLAddr(), + Host: s.Server.SQLAddr(), } db3, err := gosql.Open("postgres", pgURL.String()) @@ -2229,25 +2118,25 @@ func TestChangefeedAvroNotice(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, stop := startTestServer(t, feedTestOptions{}) + s, stop := makeServer(t) defer stop() schemaReg := cdctest.StartTestSchemaRegistry() defer schemaReg.Close() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "CREATE table foo (i int)") sqlDB.Exec(t, `INSERT INTO foo VALUES (0)`) sql := fmt.Sprintf("CREATE CHANGEFEED FOR d.foo INTO 'null://' WITH format=experimental_avro, confluent_schema_registry='%s'", schemaReg.URL()) - expectNotice(t, s, sql, `avro is no longer experimental, use format=avro`) + expectNotice(t, s.Server, sql, `avro is no longer experimental, use format=avro`) } func TestChangefeedOutputTopics(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - pgURL, cleanup := sqlutils.PGUrl(t, f.Server().ServingSQLAddr(), t.Name(), url.User(username.RootUser)) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + pgURL, cleanup := sqlutils.PGUrl(t, s.Server.SQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() pgBase, err := pq.NewConnector(pgURL.String()) if err != nil { @@ -2296,8 +2185,7 @@ func TestChangefeedOutputTopics(t *testing.T) { sqlDB.Exec(t, `CREATE CHANGEFEED FOR ☃ INTO 'kafka://does.not.matter/'`) require.Equal(t, `changefeed will emit to topic _u2603_`, actual) } - - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func requireErrorSoon( @@ -2332,8 +2220,8 @@ func TestChangefeedFailOnTableOffline(t *testing.T) { })) defer dataSrv.Close() - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '50ms'") t.Run("import fails changefeed", func(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE for_import (a INT PRIMARY KEY, b INT)`) @@ -2349,16 +2237,8 @@ func TestChangefeedFailOnTableOffline(t *testing.T) { regexp.MustCompile(`CHANGEFEED cannot target offline table: for_import \(offline reason: "importing"\)`)) }) } - // TODO(ssd): tenant tests skipped because of: - // changefeed_test.go:1409: error executing 'IMPORT INTO - // for_import CSV DATA ($1)': pq: fake protectedts.Provide - t.Run(`sinkless`, sinklessTest(testFn, feedTestNoTenants)) - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) - t.Run(`cloudstorage`, cloudStorageTest(testFn, feedTestNoTenants)) - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) - t.Run(`pubsub`, pubsubTest(testFn, feedTestNoTenants)) + cdcTest(t, testFn) } func TestChangefeedRestartMultiNode(t *testing.T) { @@ -2468,8 +2348,8 @@ func TestChangefeedWorksOnRBRChange(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFnJSON := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFnJSON := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '50ms'") t.Run("regional by row change works", func(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE rbr (a INT PRIMARY KEY, b INT)`) @@ -2489,8 +2369,8 @@ func TestChangefeedWorksOnRBRChange(t *testing.T) { }) }) } - testFnAvro := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFnAvro := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '50ms'") t.Run("regional by row change works", func(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE rbr (a INT PRIMARY KEY, b INT)`) @@ -2535,17 +2415,18 @@ func TestChangefeedWorksOnRBRChange(t *testing.T) { }) } - // Tenants skiped because of: + // Tenants skipped because of: // // error executing 'ALTER DATABASE d PRIMARY REGION // "us-east-1"': pq: get_live_cluster_regions: unimplemented: // operation is unsupported in multi-tenancy mode opts := []feedTestOption{ feedTestNoTenants, + feedTestEnterpriseSinks, withArgsFn(withTestServerRegion), } - RunRandomSinkTest(t, "format=json", testFnJSON, opts...) - t.Run("kafka/format=avro", kafkaTest(testFnAvro, opts...)) + cdcTestNamed(t, "format=json", testFnJSON, opts...) + cdcTestNamed(t, "format=avro", testFnAvro, append(opts, feedTestForceSink("kafka"))...) } func TestChangefeedRBRAvroAddRegion(t *testing.T) { @@ -2609,8 +2490,8 @@ func TestChangefeedStopOnSchemaChange(t *testing.T) { } } } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) // Shorten the intervals so this test doesn't take so long. We need to wait // for timestamps to get resolved. sqlDB.Exec(t, "SET CLUSTER SETTING changefeed.experimental_poll_interval = '200ms'") @@ -2727,11 +2608,7 @@ func TestChangefeedStopOnSchemaChange(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedNoBackfill(t *testing.T) { @@ -2740,8 +2617,8 @@ func TestChangefeedNoBackfill(t *testing.T) { skip.UnderRace(t) skip.UnderShort(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // Shorten the intervals so this test doesn't take so long. We need to wait // for timestamps to get resolved. @@ -2847,19 +2724,15 @@ func TestChangefeedNoBackfill(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedStoredComputedColumn(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE cc ( a INT, b INT AS (a + 1) STORED, c INT AS (a + 2) STORED, PRIMARY KEY (b, a) )`) @@ -2878,11 +2751,7 @@ func TestChangefeedStoredComputedColumn(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedVirtualComputedColumn(t *testing.T) { @@ -2923,8 +2792,8 @@ func TestChangefeedVirtualComputedColumn(t *testing.T) { } for _, test := range tests { - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE cc ( a INT primary key, b INT, c INT AS (b + 1) VIRTUAL NOT NULL @@ -2945,12 +2814,10 @@ func TestChangefeedVirtualComputedColumn(t *testing.T) { } if test.formatOpt != changefeedbase.OptFormatAvro { - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) + cdcTest(t, testFn) + } else { + cdcTest(t, testFn, feedTestForceSink("kafka")) } - - t.Run(`kafka`, kafkaTest(testFn)) } } @@ -2958,8 +2825,8 @@ func TestChangefeedUpdatePrimaryKey(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) // This NOT NULL column checks a regression when used with UPDATE-ing a // primary key column or with DELETE. sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING NOT NULL)`) @@ -2983,11 +2850,7 @@ func TestChangefeedUpdatePrimaryKey(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedTruncateOrDrop(t *testing.T) { @@ -3010,9 +2873,9 @@ func TestChangefeedTruncateOrDrop(t *testing.T) { }) } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) - registry := f.Server().JobRegistry().(*jobs.Registry) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) + registry := s.Server.JobRegistry().(*jobs.Registry) metrics := registry.MetricsStruct().Changefeed.(*Metrics) drainUntilErr := func(f cdctest.TestFeed) (err error) { @@ -3055,11 +2918,7 @@ func TestChangefeedTruncateOrDrop(t *testing.T) { assertFailuresCounter(t, metrics, 3) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) // will sometimes fail, non deterministic } @@ -3067,34 +2926,33 @@ func TestChangefeedMonitoring(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`) - s := f.Server() - if c := s.MustGetSQLCounter(`changefeed.emitted_messages`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.emitted_messages`); c != 0 { t.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.emitted_bytes`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.emitted_bytes`); c != 0 { t.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.flushed_bytes`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.flushed_bytes`); c != 0 { t.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.flushes`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.flushes`); c != 0 { t.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.max_behind_nanos`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.max_behind_nanos`); c != 0 { t.Errorf(`expected %d got %d`, 0, c) } - if c := s.MustGetSQLCounter(`changefeed.buffer_entries.in`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.buffer_entries.in`); c != 0 { t.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.buffer_entries.out`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.buffer_entries.out`); c != 0 { t.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.table_metadata_nanos`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.table_metadata_nanos`); c != 0 { t.Errorf(`expected 0 got %d`, c) } @@ -3110,28 +2968,28 @@ func TestChangefeedMonitoring(t *testing.T) { require.NoError(t, err) testutils.SucceedsSoon(t, func() error { - if c := s.MustGetSQLCounter(`changefeed.emitted_messages`); c != 1 { + if c := s.Server.MustGetSQLCounter(`changefeed.emitted_messages`); c != 1 { return errors.Errorf(`expected 1 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.emitted_bytes`); c != 22 { + if c := s.Server.MustGetSQLCounter(`changefeed.emitted_bytes`); c != 22 { return errors.Errorf(`expected 22 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.flushed_bytes`); c != 22 { + if c := s.Server.MustGetSQLCounter(`changefeed.flushed_bytes`); c != 22 { return errors.Errorf(`expected 22 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.flushes`); c <= 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.flushes`); c <= 0 { return errors.Errorf(`expected > 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.running`); c != 1 { + if c := s.Server.MustGetSQLCounter(`changefeed.running`); c != 1 { return errors.Errorf(`expected 1 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.max_behind_nanos`); c <= 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.max_behind_nanos`); c <= 0 { return errors.Errorf(`expected > 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.buffer_entries.in`); c <= 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.buffer_entries.in`); c <= 0 { return errors.Errorf(`expected > 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.buffer_entries.out`); c <= 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.buffer_entries.out`); c <= 0 { return errors.Errorf(`expected > 0 got %d`, c) } return nil @@ -3148,10 +3006,10 @@ func TestChangefeedMonitoring(t *testing.T) { testutils.SucceedsSoon(t, func() error { // We can't assert exactly 4 or 88 in case we get (allowed) duplicates // from RangeFeed. - if c := s.MustGetSQLCounter(`changefeed.emitted_messages`); c < 4 { + if c := s.Server.MustGetSQLCounter(`changefeed.emitted_messages`); c < 4 { return errors.Errorf(`expected >= 4 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.emitted_bytes`); c < 88 { + if c := s.Server.MustGetSQLCounter(`changefeed.emitted_bytes`); c < 88 { return errors.Errorf(`expected >= 88 got %d`, c) } return nil @@ -3162,17 +3020,17 @@ func TestChangefeedMonitoring(t *testing.T) { require.NoError(t, foo.Close()) require.NoError(t, fooCopy.Close()) testutils.SucceedsSoon(t, func() error { - if c := s.MustGetSQLCounter(`changefeed.max_behind_nanos`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.max_behind_nanos`); c != 0 { return errors.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.running`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.running`); c != 0 { return errors.Errorf(`expected 0 got %d`, c) } return nil }) } - // TODO(ssd): tenant tests skipped because of f.Server() use - t.Run(`sinkless`, sinklessTest(testFn, feedTestNoTenants)) + + cdcTest(t, testFn, feedTestForceSink("sinkless")) } func TestChangefeedRetryableError(t *testing.T) { @@ -3180,8 +3038,8 @@ func TestChangefeedRetryableError(t *testing.T) { defer log.Scope(t).Close(t) defer utilccl.TestingEnableEnterprise()() - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - knobs := f.Server().TestingKnobs(). + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) var failEmit int64 @@ -3197,7 +3055,7 @@ func TestChangefeedRetryableError(t *testing.T) { } // Set up a new feed and verify that the sink is started up. - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) foo := feed(t, f, `CREATE CHANGEFEED FOR foo`) defer closeFeed(t, foo) @@ -3210,7 +3068,7 @@ func TestChangefeedRetryableError(t *testing.T) { // sink is failing requests. atomic.StoreInt64(&failEmit, 1) sqlDB.Exec(t, `INSERT INTO foo VALUES (2)`) - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) sli, err := registry.MetricsStruct().Changefeed.(*Metrics).getSLIMetrics(defaultSLIScope) require.NoError(t, err) @@ -3257,11 +3115,7 @@ func TestChangefeedRetryableError(t *testing.T) { } } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedJobRetryOnNoInboundStream(t *testing.T) { @@ -3296,7 +3150,7 @@ func TestChangefeedJobRetryOnNoInboundStream(t *testing.T) { defer closeFeed(t, foo) // Verify job progress contains retryable error status. - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := cluster.Server(feedServerID).JobRegistry().(*jobs.Registry) jobID := foo.(cdctest.EnterpriseTestFeed).JobID() testutils.SucceedsSoon(t, func() error { job, err := registry.LoadJob(context.Background(), jobID) @@ -3336,15 +3190,15 @@ func TestChangefeedJobUpdateFailsIfNotClaimed(t *testing.T) { // undo our deletion of the claim ID below. knobs.JobsTestingKnobs.(*jobs.TestingKnobs).IntervalOverrides.Adopt = &adoptionInterval }) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - knobs := f.Server().TestingKnobs().DistSQL.(*execinfra.TestingKnobs).Changefeed.(*TestingKnobs) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + knobs := s.TestingKnobs.DistSQL.(*execinfra.TestingKnobs).Changefeed.(*TestingKnobs) errChan := make(chan error, 1) knobs.HandleDistChangefeedError = func(err error) error { errChan <- err return err } - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b INT)`) sqlDB.Exec(t, `INSERT INTO foo (a, b) VALUES (1, 1)`) @@ -3371,9 +3225,10 @@ func TestChangefeedJobUpdateFailsIfNotClaimed(t *testing.T) { case <-time.After(5 * time.Second): t.Fatal("expected distflow to fail but it hasn't after 5 seconds") } - } - RunRandomSinkTest(t, "fails as expected", testFn, feedTestNoTenants, sessionOverride) + + // TODO: Figure out why this freezes on tenants + cdcTest(t, testFn, sessionOverride, feedTestNoTenants, feedTestEnterpriseSinks) } // TestChangefeedDataTTL ensures that changefeeds fail with an error in the case @@ -3382,13 +3237,13 @@ func TestChangefeedDataTTL(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { // Set a very simple channel-based, wait-and-resume function as the // BeforeEmitRow hook. var shouldWait int32 wait := make(chan struct{}) resume := make(chan struct{}) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.FeedKnobs.BeforeScanRequest = func(_ *kv.Batch) error { @@ -3400,7 +3255,7 @@ func TestChangefeedDataTTL(t *testing.T) { return nil } - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) // Create the data table; it will only contain a // single row with multiple versions. @@ -3456,7 +3311,7 @@ func TestChangefeedDataTTL(t *testing.T) { // Force a GC of the table. This should cause both // versions of the table to be deleted. - forceTableGC(t, f.Server(), sqlDB, "d", "foo") + forceTableGC(t, s.SystemServer, sqlDB, "d", "foo") // Resume our changefeed normally. atomic.StoreInt32(&shouldWait, 0) @@ -3495,10 +3350,9 @@ func TestChangefeedDataTTL(t *testing.T) { // NOTE(ssd): This test doesn't apply to enterprise // changefeeds since enterprise changefeeds create a protected // timestamp before beginning their backfill. - // - // TODO(ssd): Tenant test disabled because this test requires - // the fully TestServerInterface. - t.Run("sinkless", sinklessTest(testFn, feedTestNoTenants)) + // TODO(samiskin): Tenant test disabled because this test requires + // forceTableGC which doesn't work on tenants + cdcTestWithSystem(t, testFn, feedTestForceSink("sinkless"), feedTestNoTenants) } // TestChangefeedSchemaTTL ensures that changefeeds fail with an error in the case @@ -3507,13 +3361,13 @@ func TestChangefeedSchemaTTL(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { // Set a very simple channel-based, wait-and-resume function as the // BeforeEmitRow hook. var shouldWait int32 wait := make(chan struct{}) resume := make(chan struct{}) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.BeforeEmitRow = func(_ context.Context) error { @@ -3525,7 +3379,7 @@ func TestChangefeedSchemaTTL(t *testing.T) { return nil } - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) // Create the data table; it will only contain a single row with multiple // versions. @@ -3558,7 +3412,7 @@ func TestChangefeedSchemaTTL(t *testing.T) { // Force a GC of the table. This should cause both older versions of the // table to be deleted, with the middle version being lost to the changefeed. - forceTableGC(t, f.Server(), sqlDB, "system", "descriptor") + forceTableGC(t, s.SystemServer, sqlDB, "system", "descriptor") // Resume our changefeed normally. atomic.StoreInt32(&shouldWait, 0) @@ -3577,14 +3431,10 @@ func TestChangefeedSchemaTTL(t *testing.T) { } } - // TODO(ssd): tenant tests skipped because of f.Server() use - // in forceTableGC - t.Run("sinkless", sinklessTest(testFn, feedTestNoTenants)) - t.Run("enterprise", enterpriseTest(testFn, feedTestNoTenants)) - t.Run("cloudstorage", cloudStorageTest(testFn, feedTestNoTenants)) - t.Run("kafka", kafkaTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) - t.Run(`pubsub`, pubsubTest(testFn, feedTestNoTenants)) + + // TODO(samiskin): tenant tests skipped because of forceTableGC not working + // with a TestTenantInterface + cdcTestWithSystem(t, testFn, feedTestNoTenants) } func TestChangefeedErrors(t *testing.T) { @@ -4159,14 +4009,14 @@ func TestChangefeedDescription(t *testing.T) { // Intentionally don't use the TestFeedFactory because we want to // control the placeholders. - s, db, stopServer := startTestServer(t, feedTestOptions{}) + s, stopServer := makeServer(t) defer stopServer() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`) - sink, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(username.RootUser)) + sink, cleanup := sqlutils.PGUrl(t, s.Server.SQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() sink.Scheme = changefeedbase.SinkSchemeExperimentalSQL sink.Path = `d` @@ -4190,8 +4040,8 @@ func TestChangefeedPauseUnpause(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`) @@ -4241,11 +4091,7 @@ func TestChangefeedPauseUnpause(t *testing.T) { }) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedPauseUnpauseCursorAndInitialScan(t *testing.T) { @@ -4253,8 +4099,8 @@ func TestChangefeedPauseUnpauseCursorAndInitialScan(t *testing.T) { defer log.Scope(t).Close(t) skip.UnderRaceWithIssue(t, 67565) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`) @@ -4288,24 +4134,20 @@ func TestChangefeedPauseUnpauseCursorAndInitialScan(t *testing.T) { }) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedUpdateProtectedTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { ctx := context.Background() ptsInterval := 50 * time.Millisecond changefeedbase.ProtectTimestampInterval.Override( - context.Background(), &f.Server().ClusterSettings().SV, ptsInterval) + context.Background(), &s.Server.ClusterSettings().SV, ptsInterval) - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "SET CLUSTER SETTING kv.protectedts.poll_interval = '10ms';") sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'") // speeds up the test sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) @@ -4313,10 +4155,10 @@ func TestChangefeedUpdateProtectedTimestamp(t *testing.T) { defer closeFeed(t, foo) fooDesc := desctestutils.TestingGetPublicTableDescriptor( - f.Server().DB(), keys.SystemSQLCodec, "d", "foo") + s.SystemServer.DB(), s.Codec, "d", "foo") - ptp := f.Server().DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider - store, err := f.Server().GetStores().(*kvserver.Stores).GetStore(f.Server().GetFirstStoreID()) + ptp := s.Server.DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider + store, err := s.SystemServer.GetStores().(*kvserver.Stores).GetStore(s.SystemServer.GetFirstStoreID()) require.NoError(t, err) ptsReader := store.GetStoreConfig().ProtectedTimestampReader @@ -4332,7 +4174,7 @@ func TestChangefeedUpdateProtectedTimestamp(t *testing.T) { } mkGetProtections := func(t *testing.T, ptp protectedts.Provider, - srv serverutils.TestServerInterface, ptsReader spanconfig.ProtectedTSReader, + srv serverutils.TestTenantInterface, ptsReader spanconfig.ProtectedTSReader, span roachpb.Span) func() []hlc.Timestamp { return func() (r []hlc.Timestamp) { require.NoError(t, @@ -4352,19 +4194,19 @@ func TestChangefeedUpdateProtectedTimestamp(t *testing.T) { } // Setup helpers on the system.descriptors table. - descriptorTableKey := keys.SystemSQLCodec.TablePrefix(keys.DescriptorTableID) + descriptorTableKey := s.Codec.TablePrefix(keys.DescriptorTableID) descriptorTableSpan := roachpb.Span{ Key: descriptorTableKey, EndKey: descriptorTableKey.PrefixEnd(), } - getDescriptorTableProtection := mkGetProtections(t, ptp, f.Server(), ptsReader, + getDescriptorTableProtection := mkGetProtections(t, ptp, s.Server, ptsReader, descriptorTableSpan) // Setup helpers on the user table. - tableKey := keys.SystemSQLCodec.TablePrefix(uint32(fooDesc.GetID())) + tableKey := s.Codec.TablePrefix(uint32(fooDesc.GetID())) tableSpan := roachpb.Span{ Key: tableKey, EndKey: tableKey.PrefixEnd(), } - getTableProtection := mkGetProtections(t, ptp, f.Server(), ptsReader, tableSpan) + getTableProtection := mkGetProtections(t, ptp, s.Server, ptsReader, tableSpan) waitForProtectionAdvanced := func(ts hlc.Timestamp, getProtection func() []hlc.Timestamp) { check := func(protections []hlc.Timestamp) error { if len(protections) == 0 { @@ -4390,9 +4232,7 @@ func TestChangefeedUpdateProtectedTimestamp(t *testing.T) { } } - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) + cdcTestWithSystem(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedProtectedTimestamps(t *testing.T) { @@ -4444,7 +4284,7 @@ func TestChangefeedProtectedTimestamps(t *testing.T) { return nil }) mkGetProtections = func(t *testing.T, ptp protectedts.Provider, - srv serverutils.TestServerInterface, ptsReader spanconfig.ProtectedTSReader, + srv serverutils.TestTenantInterface, ptsReader spanconfig.ProtectedTSReader, span roachpb.Span) func() []hlc.Timestamp { return func() (r []hlc.Timestamp) { require.NoError(t, @@ -4475,124 +4315,123 @@ func TestChangefeedProtectedTimestamps(t *testing.T) { } ) - t.Run(`enterprise`, enterpriseTest( - func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - defer close(done) - sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, `SET CLUSTER SETTING kv.protectedts.poll_interval = '10ms';`) - sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms';`) - sqlDB.Exec(t, `ALTER RANGE default CONFIGURE ZONE USING gc.ttlseconds = 100`) - sqlDB.Exec(t, `ALTER RANGE system CONFIGURE ZONE USING gc.ttlseconds = 100`) - sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) - sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) + sqlDB.Exec(t, `SET CLUSTER SETTING kv.protectedts.poll_interval = '10ms';`) + sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms';`) + sqlDB.Exec(t, `ALTER RANGE default CONFIGURE ZONE USING gc.ttlseconds = 100`) + sqlDB.Exec(t, `ALTER RANGE system CONFIGURE ZONE USING gc.ttlseconds = 100`) + sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) + sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`) - var tableID int - sqlDB.QueryRow(t, `SELECT table_id FROM crdb_internal.tables `+ - `WHERE name = 'foo' AND database_name = current_database()`). - Scan(&tableID) + var tableID int + sqlDB.QueryRow(t, `SELECT table_id FROM crdb_internal.tables `+ + `WHERE name = 'foo' AND database_name = current_database()`). + Scan(&tableID) - changefeedbase.ProtectTimestampInterval.Override( - context.Background(), &f.Server().ClusterSettings().SV, 100*time.Millisecond) + changefeedbase.ProtectTimestampInterval.Override( + context.Background(), &s.Server.ClusterSettings().SV, 100*time.Millisecond) - ptp := f.Server().DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider - store, err := f.Server().GetStores().(*kvserver.Stores).GetStore(f.Server().GetFirstStoreID()) - require.NoError(t, err) - ptsReader := store.GetStoreConfig().ProtectedTimestampReader + ptp := s.Server.DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider + store, err := s.SystemServer.GetStores().(*kvserver.Stores).GetStore(s.SystemServer.GetFirstStoreID()) + require.NoError(t, err) + ptsReader := store.GetStoreConfig().ProtectedTimestampReader - // Setup helpers on the system.descriptors table. - descriptorTableKey := keys.SystemSQLCodec.TablePrefix(keys.DescriptorTableID) - descriptorTableSpan := roachpb.Span{ - Key: descriptorTableKey, EndKey: descriptorTableKey.PrefixEnd(), - } - getDescriptorTableProtection := mkGetProtections(t, ptp, f.Server(), ptsReader, - descriptorTableSpan) - waitForDescriptorTableProtection := mkWaitForProtectionCond(t, getDescriptorTableProtection, - checkProtection) - waitForNoDescriptorTableProtection := mkWaitForProtectionCond(t, getDescriptorTableProtection, - checkNoProtection) - - // Setup helpers on the user table. - tableKey := keys.SystemSQLCodec.TablePrefix(uint32(tableID)) - tableSpan := roachpb.Span{ - Key: tableKey, EndKey: tableKey.PrefixEnd(), - } - getTableProtection := mkGetProtections(t, ptp, f.Server(), ptsReader, tableSpan) - waitForTableProtection := mkWaitForProtectionCond(t, getTableProtection, checkProtection) - waitForNoTableProtection := mkWaitForProtectionCond(t, getTableProtection, checkNoProtection) - waitForBlocked := requestBlockedScan() - waitForProtectionAdvanced := func(ts hlc.Timestamp, getProtection func() []hlc.Timestamp) { - check := func(protections []hlc.Timestamp) error { - if len(protections) != 0 { - for _, p := range protections { - if p.LessEq(ts) { - return errors.Errorf("expected protected timestamp to exceed %v, found %v", ts, p) - } + // Setup helpers on the system.descriptors table. + descriptorTableKey := s.Codec.TablePrefix(keys.DescriptorTableID) + descriptorTableSpan := roachpb.Span{ + Key: descriptorTableKey, EndKey: descriptorTableKey.PrefixEnd(), + } + getDescriptorTableProtection := mkGetProtections(t, ptp, s.Server, ptsReader, + descriptorTableSpan) + waitForDescriptorTableProtection := mkWaitForProtectionCond(t, getDescriptorTableProtection, + checkProtection) + waitForNoDescriptorTableProtection := mkWaitForProtectionCond(t, getDescriptorTableProtection, + checkNoProtection) + + // Setup helpers on the user table. + tableKey := s.Codec.TablePrefix(uint32(tableID)) + tableSpan := roachpb.Span{ + Key: tableKey, EndKey: tableKey.PrefixEnd(), + } + getTableProtection := mkGetProtections(t, ptp, s.Server, ptsReader, tableSpan) + waitForTableProtection := mkWaitForProtectionCond(t, getTableProtection, checkProtection) + waitForNoTableProtection := mkWaitForProtectionCond(t, getTableProtection, checkNoProtection) + waitForBlocked := requestBlockedScan() + waitForProtectionAdvanced := func(ts hlc.Timestamp, getProtection func() []hlc.Timestamp) { + check := func(protections []hlc.Timestamp) error { + if len(protections) != 0 { + for _, p := range protections { + if p.LessEq(ts) { + return errors.Errorf("expected protected timestamp to exceed %v, found %v", ts, p) } } - return nil } - - mkWaitForProtectionCond(t, getProtection, check)() + return nil } - foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved`) - defer closeFeed(t, foo) - { - // Ensure that there's a protected timestamp on startup that goes - // away after the initial scan. - unblock := waitForBlocked() - waitForTableProtection() - unblock() - assertPayloads(t, foo, []string{ - `foo: [1]->{"after": {"a": 1, "b": "a"}}`, - `foo: [2]->{"after": {"a": 2, "b": "b"}}`, - `foo: [4]->{"after": {"a": 4, "b": "c"}}`, - `foo: [7]->{"after": {"a": 7, "b": "d"}}`, - `foo: [8]->{"after": {"a": 8, "b": "e"}}`, - }) - resolved, _ := expectResolvedTimestamp(t, foo) - waitForProtectionAdvanced(resolved, getTableProtection) - } + mkWaitForProtectionCond(t, getProtection, check)() + } - { - // Ensure that a protected timestamp is created for a backfill due - // to a schema change and removed after. - waitForBlocked = requestBlockedScan() - sqlDB.Exec(t, `ALTER TABLE foo ADD COLUMN c INT NOT NULL DEFAULT 1`) - unblock := waitForBlocked() - waitForTableProtection() - waitForDescriptorTableProtection() - unblock() - assertPayloads(t, foo, []string{ - `foo: [1]->{"after": {"a": 1, "b": "a", "c": 1}}`, - `foo: [2]->{"after": {"a": 2, "b": "b", "c": 1}}`, - `foo: [4]->{"after": {"a": 4, "b": "c", "c": 1}}`, - `foo: [7]->{"after": {"a": 7, "b": "d", "c": 1}}`, - `foo: [8]->{"after": {"a": 8, "b": "e", "c": 1}}`, - }) - resolved, _ := expectResolvedTimestamp(t, foo) - waitForProtectionAdvanced(resolved, getTableProtection) - waitForProtectionAdvanced(resolved, getDescriptorTableProtection) - } + foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved`) + defer closeFeed(t, foo) + { + // Ensure that there's a protected timestamp on startup that goes + // away after the initial scan. + unblock := waitForBlocked() + waitForTableProtection() + unblock() + assertPayloads(t, foo, []string{ + `foo: [1]->{"after": {"a": 1, "b": "a"}}`, + `foo: [2]->{"after": {"a": 2, "b": "b"}}`, + `foo: [4]->{"after": {"a": 4, "b": "c"}}`, + `foo: [7]->{"after": {"a": 7, "b": "d"}}`, + `foo: [8]->{"after": {"a": 8, "b": "e"}}`, + }) + resolved, _ := expectResolvedTimestamp(t, foo) + waitForProtectionAdvanced(resolved, getTableProtection) + } - { - // Ensure that the protected timestamp is removed when the job is - // canceled. - waitForBlocked = requestBlockedScan() - sqlDB.Exec(t, `ALTER TABLE foo ADD COLUMN d INT NOT NULL DEFAULT 2`) - _ = waitForBlocked() - waitForTableProtection() - waitForDescriptorTableProtection() - sqlDB.Exec(t, `CANCEL JOB $1`, foo.(cdctest.EnterpriseTestFeed).JobID()) - waitForNoTableProtection() - waitForNoDescriptorTableProtection() - } - }, feedTestNoTenants, withArgsFn(func(args *base.TestServerArgs) { - storeKnobs := &kvserver.StoreTestingKnobs{} - storeKnobs.TestingRequestFilter = requestFilter - args.Knobs.Store = storeKnobs - }, - ))) + { + // Ensure that a protected timestamp is created for a backfill due + // to a schema change and removed after. + waitForBlocked = requestBlockedScan() + sqlDB.Exec(t, `ALTER TABLE foo ADD COLUMN c INT NOT NULL DEFAULT 1`) + unblock := waitForBlocked() + waitForTableProtection() + waitForDescriptorTableProtection() + unblock() + assertPayloads(t, foo, []string{ + `foo: [1]->{"after": {"a": 1, "b": "a", "c": 1}}`, + `foo: [2]->{"after": {"a": 2, "b": "b", "c": 1}}`, + `foo: [4]->{"after": {"a": 4, "b": "c", "c": 1}}`, + `foo: [7]->{"after": {"a": 7, "b": "d", "c": 1}}`, + `foo: [8]->{"after": {"a": 8, "b": "e", "c": 1}}`, + }) + resolved, _ := expectResolvedTimestamp(t, foo) + waitForProtectionAdvanced(resolved, getTableProtection) + waitForProtectionAdvanced(resolved, getDescriptorTableProtection) + } + + { + // Ensure that the protected timestamp is removed when the job is + // canceled. + waitForBlocked = requestBlockedScan() + sqlDB.Exec(t, `ALTER TABLE foo ADD COLUMN d INT NOT NULL DEFAULT 2`) + _ = waitForBlocked() + waitForTableProtection() + waitForDescriptorTableProtection() + sqlDB.Exec(t, `CANCEL JOB $1`, foo.(cdctest.EnterpriseTestFeed).JobID()) + waitForNoTableProtection() + waitForNoDescriptorTableProtection() + } + } + + cdcTestWithSystem(t, testFn, feedTestNoTenants, feedTestEnterpriseSinks, withArgsFn(func(args *base.TestServerArgs) { + storeKnobs := &kvserver.StoreTestingKnobs{} + storeKnobs.TestingRequestFilter = requestFilter + args.Knobs.Store = storeKnobs + })) } func TestChangefeedProtectedTimestampOnPause(t *testing.T) { @@ -4600,8 +4439,8 @@ func TestChangefeedProtectedTimestampOnPause(t *testing.T) { defer log.Scope(t).Close(t) testFn := func(shouldPause bool) cdcTestFn { - return func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + return func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`) @@ -4627,7 +4466,7 @@ func TestChangefeedProtectedTimestampOnPause(t *testing.T) { // Pause the job then ensure that it has a reasonable protected timestamp. ctx := context.Background() - serverCfg := f.Server().DistSQLServer().(*distsql.ServerImpl).ServerConfig + serverCfg := s.Server.DistSQLServer().(*distsql.ServerImpl).ServerConfig jr := serverCfg.JobRegistry pts := serverCfg.ProtectedTimestampProvider @@ -4673,11 +4512,7 @@ func TestChangefeedProtectedTimestampOnPause(t *testing.T) { } testutils.RunTrueAndFalse(t, "protect_on_pause", func(t *testing.T, shouldPause bool) { - t.Run(`enterprise`, enterpriseTest(testFn(shouldPause), feedTestNoTenants)) - t.Run(`cloudstorage`, cloudStorageTest(testFn(shouldPause), feedTestNoTenants)) - t.Run(`kafka`, kafkaTest(testFn(shouldPause), feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn(shouldPause), feedTestNoTenants)) - t.Run(`pubsub`, pubsubTest(testFn(shouldPause), feedTestNoTenants)) + cdcTest(t, testFn(shouldPause), feedTestEnterpriseSinks) }) } @@ -4686,8 +4521,8 @@ func TestManyChangefeedsOneTable(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'init')`) @@ -4731,20 +4566,15 @@ func TestManyChangefeedsOneTable(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestUnspecifiedPrimaryKey(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT)`) var id0 int sqlDB.QueryRow(t, `INSERT INTO foo VALUES (0) RETURNING rowid`).Scan(&id0) @@ -4761,10 +4591,7 @@ func TestUnspecifiedPrimaryKey(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } // TestChangefeedNodeShutdown ensures that an enterprise changefeed continues @@ -4832,8 +4659,8 @@ func TestChangefeedTelemetry(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -4869,8 +4696,8 @@ func TestChangefeedTelemetry(t *testing.T) { require.Equal(t, int32(1), counts[`changefeed.create.num_tables.2`]) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("sinkless")) + cdcTest(t, testFn, feedTestForceSink("enterprise")) } // Regression test for #41694. @@ -4881,8 +4708,8 @@ func TestChangefeedRestartDuringBackfill(t *testing.T) { // TODO(yevgeniy): Rework this test. It's too brittle. - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - knobs := f.Server().TestingKnobs(). + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) beforeEmitRowCh := make(chan error, 20) @@ -4901,7 +4728,7 @@ func TestChangefeedRestartDuringBackfill(t *testing.T) { } } - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0), (1), (2), (3)`) @@ -4990,7 +4817,8 @@ func TestChangefeedRestartDuringBackfill(t *testing.T) { } knobs.Store.(*kvserver.StoreTestingKnobs).UseSystemConfigSpanForQueues = true }) - t.Run(`kafka`, kafkaTest(testFn, useSysCfgInKV)) + + cdcTest(t, testFn, feedTestForceSink("kafka"), useSysCfgInKV) } func TestChangefeedHandlesDrainingNodes(t *testing.T) { @@ -5049,7 +4877,8 @@ func TestChangefeedHandlesDrainingNodes(t *testing.T) { // Create a factory which executes the CREATE CHANGEFEED statement on server 0. // This statement should fail, but the job itself ought to be creaated. // After some time, that job should be adopted by another node, and executed successfully. - f := makeCloudFeedFactory(tc.Server(1), tc.ServerConn(0), sinkDir) + f, closeSink := makeFeedFactory(t, randomSinkType(feedTestEnterpriseSinks), tc.Server(1), tc.ServerConn(0)) + defer closeSink() feed := feed(t, f, "CREATE CHANGEFEED FOR foo") defer closeFeed(t, feed) @@ -5082,8 +4911,8 @@ func TestChangefeedPrimaryKeyChangeWorks(t *testing.T) { skip.UnderRace(t) skip.UnderShort(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING NOT NULL)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`) @@ -5164,12 +4993,7 @@ INSERT INTO foo VALUES (1, 'f'); }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } // Primary key changes are supported by changefeeds starting in 21.1. This test @@ -5187,8 +5011,8 @@ func TestChangefeedPrimaryKeyChangeWorksWithMultipleTables(t *testing.T) { skip.UnderRace(t) skip.UnderShort(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING NOT NULL)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`) @@ -5266,12 +5090,7 @@ INSERT INTO bar VALUES (6, 'f'); }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } // TestChangefeedCheckpointSchemaChange tests to make sure that writes that @@ -5289,8 +5108,8 @@ func TestChangefeedCheckpointSchemaChange(t *testing.T) { skip.UnderRace(t) skip.UnderShort(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING NOT NULL)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY, b STRING NOT NULL)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) @@ -5318,7 +5137,7 @@ func TestChangefeedCheckpointSchemaChange(t *testing.T) { `bar: [0]->{"after": {"a": 0, "b": "initial"}}`, }) - require.NoError(t, crdb.ExecuteTx(context.Background(), db, nil, func(tx *gosql.Tx) error { + require.NoError(t, crdb.ExecuteTx(context.Background(), s.DB, nil, func(tx *gosql.Tx) error { for _, stmt := range []string{ `CREATE TABLE baz ()`, `INSERT INTO foo VALUES (2, 'initial')`, @@ -5416,14 +5235,9 @@ func TestChangefeedCheckpointSchemaChange(t *testing.T) { require.NotNil(t, next.Resolved) } }) - } - t.Run("enterprise", enterpriseTest(testFn)) - t.Run("cloudstorage", cloudStorageTest(testFn)) - t.Run("kafka", kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedBackfillCheckpoint(t *testing.T) { @@ -5449,17 +5263,17 @@ func TestChangefeedBackfillCheckpoint(t *testing.T) { return err } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) valRange := []int{1, 1000} sqlDB.Exec(t, `CREATE TABLE foo(a INT PRIMARY KEY)`) sqlDB.Exec(t, fmt.Sprintf(`INSERT INTO foo (a) SELECT * FROM generate_series(%d, %d)`, valRange[0], valRange[1])) fooDesc := desctestutils.TestingGetPublicTableDescriptor( - f.Server().DB(), keys.SystemSQLCodec, "d", "foo") - tableSpan := fooDesc.PrimaryIndexSpan(keys.SystemSQLCodec) + s.SystemServer.DB(), s.Codec, "d", "foo") + tableSpan := fooDesc.PrimaryIndexSpan(s.Codec) - knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs. + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -5492,11 +5306,11 @@ func TestChangefeedBackfillCheckpoint(t *testing.T) { // Checkpoint progress frequently, and set the checkpoint size limit. changefeedbase.FrontierCheckpointFrequency.Override( - context.Background(), &f.Server().ClusterSettings().SV, 1) + context.Background(), &s.Server.ClusterSettings().SV, 1) changefeedbase.FrontierCheckpointMaxBytes.Override( - context.Background(), &f.Server().ClusterSettings().SV, maxCheckpointSize) + context.Background(), &s.Server.ClusterSettings().SV, maxCheckpointSize) - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved='100ms'`) // Some test feeds (kafka) are not buffered, so we have to consume messages. var shouldDrain int32 = 1 @@ -5607,9 +5421,7 @@ func TestChangefeedBackfillCheckpoint(t *testing.T) { // TODO(ssd): Tenant testing disabled because of use of DB() for _, sz := range []int64{100 << 20, 100} { maxCheckpointSize = sz - t.Run(fmt.Sprintf("enterprise-limit=%s", humanize.Bytes(uint64(sz))), enterpriseTest(testFn, feedTestNoTenants)) - t.Run(fmt.Sprintf("cloudstorage-limit=%s", humanize.Bytes(uint64(sz))), cloudStorageTest(testFn, feedTestNoTenants)) - t.Run(fmt.Sprintf("kafka-limit=%s", humanize.Bytes(uint64(sz))), kafkaTest(testFn, feedTestNoTenants)) + cdcTestNamedWithSystem(t, fmt.Sprintf("limit=%s", humanize.Bytes(uint64(sz))), testFn, feedTestEnterpriseSinks) } } @@ -5669,8 +5481,8 @@ func TestChangefeedOrderingWithErrors(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH updated`) @@ -5718,20 +5530,20 @@ func TestChangefeedOrderingWithErrors(t *testing.T) { // only used for webhook sink for now since it's the only testfeed where // we can control the ordering of errors - t.Run(`webhook`, webhookTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("webhook")) } func TestChangefeedOnErrorOption(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) t.Run(`pause on error`, func(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.BeforeEmitRow = func(_ context.Context) error { @@ -5748,7 +5560,7 @@ func TestChangefeedOnErrorOption(t *testing.T) { // Verify job progress contains paused on error status. jobID := foo.(cdctest.EnterpriseTestFeed).JobID() - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) job, err := registry.LoadJob(context.Background(), jobID) require.NoError(t, err) require.Contains(t, job.Progress().RunningStatus, "job failed (should fail with custom error) but is being paused because of on_error=pause") @@ -5774,7 +5586,7 @@ func TestChangefeedOnErrorOption(t *testing.T) { t.Run(`fail on error`, func(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY, b STRING)`) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.BeforeEmitRow = func(_ context.Context) error { @@ -5794,7 +5606,7 @@ func TestChangefeedOnErrorOption(t *testing.T) { t.Run(`default`, func(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE quux (a INT PRIMARY KEY, b STRING)`) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.BeforeEmitRow = func(_ context.Context) error { @@ -5813,25 +5625,17 @@ func TestChangefeedOnErrorOption(t *testing.T) { }) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestDistSenderRangeFeedPopulatesVirtualTable(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, _ := serverutils.StartServer(t, base.TestServerArgs{ - Knobs: base.TestingKnobs{ - JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), - }, - }) - defer s.Stopper().Stop(context.Background()) + s, cleanup := makeServer(t) + defer cleanup() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled='true';`) sqlDB.Exec(t, `CREATE TABLE tbl (a INT, b STRING);`) sqlDB.Exec(t, `INSERT INTO tbl VALUES (1, 'one'), (2, 'two'), (3, 'three');`) @@ -5839,9 +5643,11 @@ func TestDistSenderRangeFeedPopulatesVirtualTable(t *testing.T) { var tableID int sqlDB.QueryRow(t, "SELECT table_id FROM crdb_internal.tables WHERE name='tbl'").Scan(&tableID) + tableKey := s.Codec.TablePrefix(uint32(tableID)) + numRangesQuery := fmt.Sprintf( - "SELECT count(*) FROM crdb_internal.active_range_feeds WHERE range_start LIKE '/Table/%d/%%'", - tableID) + "SELECT count(*) FROM crdb_internal.active_range_feeds WHERE range_start LIKE '%s/%%'", + tableKey) sqlDB.CheckQueryResultsRetry(t, numRangesQuery, [][]string{{"1"}}) } @@ -5850,8 +5656,8 @@ func TestChangefeedCaseInsensitiveOpts(t *testing.T) { defer log.Scope(t).Close(t) // Sanity check for case insensitive options - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) // Set up a type and table. sqlDB.Exec(t, `CREATE TABLE insensitive (x INT PRIMARY KEY, y string)`) sqlDB.Exec(t, `INSERT INTO insensitive VALUES (0, 'hello')`) @@ -5881,15 +5687,17 @@ func TestChangefeedCaseInsensitiveOpts(t *testing.T) { assertPayloads(t, cf, []string{`insensitive: [0]->{"after": {"x": 0, "y": "hello"}}`}) }) } - t.Run(`sinkless`, sinklessTest(testFn)) + + // Some sinks are incompatible with envelope + cdcTest(t, testFn, feedTestRestrictSinks("sinkless", "enterprise", "kafka")) } func TestChangefeedEndTime(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - knobs := f.Server().TestingKnobs(). + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) endTimeReached := make(chan struct{}) @@ -5902,12 +5710,12 @@ func TestChangefeedEndTime(t *testing.T) { } } - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "CREATE TABLE foo (a INT PRIMARY KEY)") sqlDB.Exec(t, "INSERT INTO foo VALUES (1), (2), (3)") - fakeEndTime := f.Server().Clock().Now().Add(int64(time.Hour), 0).AsOfSystemTime() + fakeEndTime := s.Server.Clock().Now().Add(int64(time.Hour), 0).AsOfSystemTime() feed := feed(t, f, "CREATE CHANGEFEED FOR foo WITH end_time = $1", fakeEndTime) defer closeFeed(t, feed) @@ -5924,17 +5732,16 @@ func TestChangefeedEndTime(t *testing.T) { return s == jobs.StatusSucceeded })) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedEndTimeWithCursor(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - knobs := f.Server().TestingKnobs(). + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) endTimeReached := make(chan struct{}) @@ -5947,7 +5754,7 @@ func TestChangefeedEndTimeWithCursor(t *testing.T) { } } - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "CREATE TABLE foo (a INT PRIMARY KEY)") sqlDB.Exec(t, "INSERT INTO foo VALUES (1), (2), (3)") @@ -5956,7 +5763,7 @@ func TestChangefeedEndTimeWithCursor(t *testing.T) { sqlDB.QueryRow(t, "SELECT (cluster_logical_timestamp())").Scan(&tsCursor) sqlDB.Exec(t, "INSERT INTO foo VALUES (4), (5), (6)") - fakeEndTime := f.Server().Clock().Now().Add(int64(time.Hour), 0).AsOfSystemTime() + fakeEndTime := s.Server.Clock().Now().Add(int64(time.Hour), 0).AsOfSystemTime() feed := feed(t, f, "CREATE CHANGEFEED FOR foo WITH cursor = $1, end_time = $2, no_initial_scan", tsCursor, fakeEndTime) defer closeFeed(t, feed) @@ -5972,9 +5779,10 @@ func TestChangefeedEndTimeWithCursor(t *testing.T) { return s == jobs.StatusSucceeded })) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + + // TODO: Fix sinkless feeds not providing pre-close events if Next is called + // after the feed was closed + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedOnlyInitialScan(t *testing.T) { @@ -5986,8 +5794,8 @@ func TestChangefeedOnlyInitialScan(t *testing.T) { `initial backfill only`: `CREATE CHANGEFEED FOR foo WITH initial_scan = 'only'`, } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) for testName, changefeedStmt := range initialScanOnlyTests { t.Run(testName, func(t *testing.T) { @@ -6028,9 +5836,8 @@ func TestChangefeedOnlyInitialScan(t *testing.T) { }) } } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedOnlyInitialScanCSV(t *testing.T) { @@ -6070,8 +5877,8 @@ func TestChangefeedOnlyInitialScanCSV(t *testing.T) { }, } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) for testName, testData := range tests { t.Run(testName, func(t *testing.T) { @@ -6117,11 +5924,8 @@ func TestChangefeedOnlyInitialScanCSV(t *testing.T) { }) } } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedOnlyInitialScanCSVSinkless(t *testing.T) { @@ -6133,8 +5937,8 @@ func TestChangefeedOnlyInitialScanCSVSinkless(t *testing.T) { `initial backfill only with csv`: `CREATE CHANGEFEED FOR foo WITH initial_scan = 'only', format = csv`, } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) for testName, changefeedStmt := range initialScanOnlyCSVTests { t.Run(testName, func(t *testing.T) { @@ -6173,15 +5977,16 @@ func TestChangefeedOnlyInitialScanCSVSinkless(t *testing.T) { }) } } - t.Run(`sinkless`, sinklessTest(testFn)) + + cdcTest(t, testFn, feedTestForceSink("sinkless")) } func TestChangefeedPrimaryKeyFilter(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "CREATE TABLE foo (a INT PRIMARY KEY, b string)") sqlDB.Exec(t, "CREATE TABLE bar (a INT PRIMARY KEY, b string)") sqlDB.Exec(t, "INSERT INTO foo SELECT * FROM generate_series(1, 20)") @@ -6220,9 +6025,7 @@ func TestChangefeedPrimaryKeyFilter(t *testing.T) { }) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn) } func startMonitorWithBudget(budget int64) *mon.BytesMonitor { @@ -6314,11 +6117,11 @@ func TestChangefeedFlushesSinkToReleaseMemory(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, stopServer := startTestServer(t, newTestOptions()) + s, stopServer := makeServer(t) defer stopServer() - sqlDB := sqlutils.MakeSQLRunner(db) - knobs := s.TestingKnobs(). + sqlDB := sqlutils.MakeSQLRunner(s.DB) + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -6387,33 +6190,22 @@ func TestChangefeedMultiPodTenantPlanning(t *testing.T) { TestingKnobs: tenantKnobs, Existing: false, } - server1, db1 := serverutils.StartTenant(t, tc.Server(0), tenant1Args) - tenantRunner := sqlutils.MakeSQLRunner(db1) + tenant1Server, tenant1DB := serverutils.StartTenant(t, tc.Server(0), tenant1Args) + tenantRunner := sqlutils.MakeSQLRunner(tenant1DB) tenantRunner.ExecMultiple(t, strings.Split(serverSetupStatements, ";")...) - testTenant := &testServerShim{server1, tc.Server(0)} - sql1 := sqlutils.MakeSQLRunner(db1) - defer db1.Close() + sql1 := sqlutils.MakeSQLRunner(tenant1DB) + defer tenant1DB.Close() tenant2Args := tenant1Args tenant2Args.Existing = true _, db2 := serverutils.StartTenant(t, tc.Server(1), tenant2Args) defer db2.Close() - // Ensure both nodes are live and able to be distributed to - testutils.SucceedsSoon(t, func() error { - status := server1.StatusServer().(serverpb.SQLStatusServer) - var nodes *serverpb.NodesListResponse - var err error - for nodes == nil || len(nodes.Nodes) != 2 { - nodes, err = status.NodesList(context.Background(), nil) - if err != nil { - return err - } - } - return nil - }) + // Ensure both pods can be assigned work + waitForTenantPodsActive(t, tenant1Server, 2) - feedFactory := makeKafkaFeedFactory(testTenant, db1) + feedFactory, cleanupSink := makeFeedFactory(t, randomSinkType(feedTestEnterpriseSinks), tenant1Server, tenant1DB) + defer cleanupSink() // Run a changefeed across two tables to guarantee multiple spans that can be spread across the aggregators sql1.Exec(t, "CREATE TABLE foo (a INT PRIMARY KEY)") @@ -6438,19 +6230,19 @@ func TestChangefeedCreateTelemetryLogs(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, stopServer := startTestServer(t, newTestOptions()) + s, stopServer := makeServer(t) defer stopServer() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO bar VALUES (0, 'initial')`) t.Run(`core_sink_type`, func(t *testing.T) { - coreSink, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(username.RootUser)) + coreSink, cleanup := sqlutils.PGUrl(t, s.Server.SQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() - coreFeedFactory := makeSinklessFeedFactory(s, coreSink) + coreFeedFactory := makeSinklessFeedFactory(s.Server, coreSink) beforeCreateSinkless := timeutil.Now() coreFeed := feed(t, coreFeedFactory, `CREATE CHANGEFEED FOR foo`) @@ -6462,7 +6254,7 @@ func TestChangefeedCreateTelemetryLogs(t *testing.T) { }) t.Run(`gcpubsub_sink_type with options`, func(t *testing.T) { - pubsubFeedFactory := makePubsubFeedFactory(s, db) + pubsubFeedFactory := makePubsubFeedFactory(s.Server, s.DB) beforeCreatePubsub := timeutil.Now() pubsubFeed := feed(t, pubsubFeedFactory, `CREATE CHANGEFEED FOR foo, bar WITH resolved, no_initial_scan`) defer closeFeed(t, pubsubFeed) @@ -6494,23 +6286,22 @@ func TestChangefeedFailedTelemetryLogs(t *testing.T) { } t.Run(`connection_closed`, func(t *testing.T) { - s, db, stopServer := startTestServer(t, newTestOptions()) + s, stopServer := makeServer(t) defer stopServer() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`) - coreSink, coreSinkCleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(username.RootUser)) - coreFactory := makeSinklessFeedFactory(s, coreSink) + coreFactory, sinkCleanup := makeFeedFactory(t, "sinkless", s.Server, s.DB) coreFeed := feed(t, coreFactory, `CREATE CHANGEFEED FOR foo`) assertPayloads(t, coreFeed, []string{ `foo: [0]->{"after": {"a": 0, "b": "updated"}}`, }) beforeCoreSinkClose := timeutil.Now() - coreSinkCleanup() + sinkCleanup() closeFeed(t, coreFeed) failLogs := waitForLogs(t, beforeCoreSinkClose) @@ -6518,8 +6309,8 @@ func TestChangefeedFailedTelemetryLogs(t *testing.T) { require.Equal(t, failLogs[0].FailureType, changefeedbase.ConnectionClosed) }) - t.Run(`user_input`, enterpriseTest(func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + cdcTestNamed(t, "user_input", func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) beforeCreate := timeutil.Now() @@ -6529,13 +6320,13 @@ func TestChangefeedFailedTelemetryLogs(t *testing.T) { failLogs := waitForLogs(t, beforeCreate) require.Equal(t, 1, len(failLogs)) require.Equal(t, failLogs[0].FailureType, changefeedbase.UserInput) - })) + }, feedTestEnterpriseSinks) - t.Run(`unknown_error`, pubsubTest(func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + cdcTestNamed(t, "unknown_error", func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.BeforeEmitRow = func(_ context.Context) error { @@ -6554,5 +6345,5 @@ func TestChangefeedFailedTelemetryLogs(t *testing.T) { require.Equal(t, failLogs[0].FailureType, changefeedbase.UnknownError) require.Equal(t, failLogs[0].SinkType, `gcpubsub`) require.Equal(t, failLogs[0].NumTables, int32(1)) - })) + }, feedTestForceSink("pubsub")) } diff --git a/pkg/ccl/changefeedccl/encoder_test.go b/pkg/ccl/changefeedccl/encoder_test.go index bac4d2fdc329..29e5eac175c2 100644 --- a/pkg/ccl/changefeedccl/encoder_test.go +++ b/pkg/ccl/changefeedccl/encoder_test.go @@ -256,10 +256,10 @@ func TestAvroEncoder(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { ctx := context.Background() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) var ts1 string sqlDB.QueryRow(t, @@ -288,7 +288,7 @@ func TestAvroEncoder(t *testing.T) { require.NoError(t, err) var ts2 string - require.NoError(t, crdb.ExecuteTx(ctx, db, nil /* txopts */, func(tx *gosql.Tx) error { + require.NoError(t, crdb.ExecuteTx(ctx, s.DB, nil /* txopts */, func(tx *gosql.Tx) error { return tx.QueryRow( `INSERT INTO foo VALUES (3, 'baz') RETURNING cluster_logical_timestamp()`, ).Scan(&ts2) @@ -300,7 +300,7 @@ func TestAvroEncoder(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroEncoderWithTLS(t *testing.T) { @@ -421,8 +421,8 @@ func TestAvroArray(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b INT[])`) sqlDB.Exec(t, `INSERT INTO foo VALUES @@ -458,15 +458,15 @@ func TestAvroArray(t *testing.T) { } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroArrayCap(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b INT[])`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, ARRAY[])`) @@ -497,15 +497,15 @@ func TestAvroArrayCap(t *testing.T) { } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroCollatedString(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b string collate "fr-CA")`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'désolée' collate "fr-CA")`) @@ -518,15 +518,15 @@ func TestAvroCollatedString(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroEnum(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TYPE status AS ENUM ('open', 'closed', 'inactive')`) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b status, c int default 0)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'open')`) @@ -577,15 +577,15 @@ func TestAvroEnum(t *testing.T) { } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroSchemaNaming(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING)`) sqlDB.Exec(t, @@ -678,15 +678,15 @@ func TestAvroSchemaNaming(t *testing.T) { } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroSchemaNamespace(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING)`) sqlDB.Exec(t, @@ -720,15 +720,15 @@ func TestAvroSchemaNamespace(t *testing.T) { require.Contains(t, foo.registry.SchemaForSubject(`superdrivers-value`), `"namespace":"super"`) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestTableNameCollision(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE DATABASE printr`) sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING)`) @@ -770,15 +770,15 @@ func TestTableNameCollision(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroMigrateToUnsupportedColumn(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`) @@ -796,18 +796,18 @@ func TestAvroMigrateToUnsupportedColumn(t *testing.T) { } } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroLedger(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { ctx := context.Background() gen := ledger.FromFlags(`--customers=1`) var l workloadsql.InsertsDataLoader - _, err := workloadsql.Setup(ctx, db, gen, l) + _, err := workloadsql.Setup(ctx, s.DB, gen, l) require.NoError(t, err) ledger := feed(t, f, fmt.Sprintf(`CREATE CHANGEFEED FOR customer, transaction, entry, session @@ -830,5 +830,5 @@ func TestAvroLedger(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } diff --git a/pkg/ccl/changefeedccl/helpers_tenant_shim_test.go b/pkg/ccl/changefeedccl/helpers_tenant_shim_test.go deleted file mode 100644 index 7dd4b3e0cb24..000000000000 --- a/pkg/ccl/changefeedccl/helpers_tenant_shim_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2021 The Cockroach Authors. -// -// Licensed as a CockroachDB Enterprise file under the Cockroach Community -// License (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt - -package changefeedccl - -import ( - "context" - - "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/config" - "github.com/cockroachdb/cockroach/pkg/kv" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/rpc" - "github.com/cockroachdb/cockroach/pkg/server/status" - "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/storage" - "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" - "github.com/cockroachdb/cockroach/pkg/util/hlc" - "github.com/cockroachdb/cockroach/pkg/util/stop" - "github.com/cockroachdb/cockroach/pkg/util/uuid" -) - -// testServerShim is a kludge to get a few more tests working in -// tenant-mode. -// -// Currently, our TestFeedFactory has a Server() method that returns a -// TestServerInterface. The TestTenantInterface returned by -// StartTenant isn't a TestServerInterface. -// -// TODO(ssd): Clean this up. Perhaps we can add a SQLServer() method -// to TestFeedFactory that returns just the bits that are shared. -type testServerShim struct { - serverutils.TestTenantInterface - kvServer serverutils.TestServerInterface -} - -const unsupportedShimMethod = ` -This TestServerInterface method is not supported for tenants. Either disable this test on tenants by using the -feedOptionNoTenants option or add an appropriate implementation for this method to testServerShim. -` - -var _ serverutils.TestServerInterface = (*testServerShim)(nil) - -func (t *testServerShim) ServingSQLAddr() string { - return t.SQLAddr() -} - -func (t *testServerShim) Stopper() *stop.Stopper { panic(unsupportedShimMethod) } -func (t *testServerShim) Start(context.Context) error { panic(unsupportedShimMethod) } -func (t *testServerShim) Node() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) NodeID() roachpb.NodeID { panic(unsupportedShimMethod) } -func (t *testServerShim) StorageClusterID() uuid.UUID { panic(unsupportedShimMethod) } -func (t *testServerShim) ServingRPCAddr() string { panic(unsupportedShimMethod) } -func (t *testServerShim) RPCAddr() string { panic(unsupportedShimMethod) } -func (t *testServerShim) DB() *kv.DB { panic(unsupportedShimMethod) } -func (t *testServerShim) RPCContext() *rpc.Context { panic(unsupportedShimMethod) } -func (t *testServerShim) LeaseManager() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) InternalExecutor() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) ExecutorConfig() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) TracerI() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) GossipI() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) RangeFeedFactory() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) DistSenderI() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) MigrationServer() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) SQLServer() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) SQLLivenessProvider() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) StartupMigrationsManager() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) NodeLiveness() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) HeartbeatNodeLiveness() error { panic(unsupportedShimMethod) } -func (t *testServerShim) NodeDialer() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) SetDistSQLSpanResolver(spanResolver interface{}) { - panic(unsupportedShimMethod) -} -func (t *testServerShim) MustGetSQLCounter(name string) int64 { panic(unsupportedShimMethod) } -func (t *testServerShim) MustGetSQLNetworkCounter(name string) int64 { panic(unsupportedShimMethod) } -func (t *testServerShim) WriteSummaries() error { panic(unsupportedShimMethod) } -func (t *testServerShim) GetFirstStoreID() roachpb.StoreID { panic(unsupportedShimMethod) } -func (t *testServerShim) GetStores() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) ClusterSettings() *cluster.Settings { panic(unsupportedShimMethod) } -func (t *testServerShim) Decommission( - ctx context.Context, targetStatus livenesspb.MembershipStatus, nodeIDs []roachpb.NodeID, -) error { - panic(unsupportedShimMethod) -} -func (t *testServerShim) SplitRange( - splitKey roachpb.Key, -) (left roachpb.RangeDescriptor, right roachpb.RangeDescriptor, err error) { - panic(unsupportedShimMethod) -} -func (t *testServerShim) MergeRanges( - leftKey roachpb.Key, -) (merged roachpb.RangeDescriptor, err error) { - panic(unsupportedShimMethod) -} -func (t *testServerShim) ExpectedInitialRangeCount() (int, error) { panic(unsupportedShimMethod) } -func (t *testServerShim) ForceTableGC( - ctx context.Context, database, table string, timestamp hlc.Timestamp, -) error { - panic(unsupportedShimMethod) -} -func (t *testServerShim) UpdateChecker() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) StartTenant( - ctx context.Context, params base.TestTenantArgs, -) (serverutils.TestTenantInterface, error) { - panic(unsupportedShimMethod) -} -func (t *testServerShim) ScratchRange() (roachpb.Key, error) { panic(unsupportedShimMethod) } -func (t *testServerShim) Engines() []storage.Engine { panic(unsupportedShimMethod) } -func (t *testServerShim) MetricsRecorder() *status.MetricsRecorder { panic(unsupportedShimMethod) } -func (t *testServerShim) CollectionFactory() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) SystemTableIDResolver() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) SpanConfigKVSubscriber() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) SystemConfigProvider() config.SystemConfigProvider { - panic(unsupportedShimMethod) -} diff --git a/pkg/ccl/changefeedccl/helpers_test.go b/pkg/ccl/changefeedccl/helpers_test.go index 897e14bb8248..b50d79001329 100644 --- a/pkg/ccl/changefeedccl/helpers_test.go +++ b/pkg/ccl/changefeedccl/helpers_test.go @@ -36,8 +36,11 @@ import ( _ "github.com/cockroachdb/cockroach/pkg/ccl/partitionccl" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -325,15 +328,6 @@ SET CLUSTER SETTING sql.defaults.vectorize=on; CREATE DATABASE d; ` -func startTestServer( - t testing.TB, options feedTestOptions, -) (serverutils.TestServerInterface, *gosql.DB, func()) { - if options.useTenant { - return startTestTenant(t, options) - } - return startTestFullServer(t, options) -} - func startTestFullServer( t testing.TB, options feedTestOptions, ) (serverutils.TestServerInterface, *gosql.DB, func()) { @@ -418,10 +412,26 @@ func startTestCluster(t testing.TB) (serverutils.TestClusterInterface, *gosql.DB return cluster, db, cleanupAndReset } +func waitForTenantPodsActive( + t testing.TB, tenantServer serverutils.TestTenantInterface, numPods int, +) { + testutils.SucceedsWithin(t, func() error { + status := tenantServer.StatusServer().(serverpb.SQLStatusServer) + var nodes *serverpb.NodesListResponse + var err error + for nodes == nil || len(nodes.Nodes) != numPods { + nodes, err = status.NodesList(context.Background(), nil) + if err != nil { + return err + } + } + return nil + }, 10*time.Second) +} + func startTestTenant( - t testing.TB, options feedTestOptions, -) (serverutils.TestServerInterface, *gosql.DB, func()) { - kvServer, _, cleanupCluster := startTestFullServer(t, options) + t testing.TB, systemServer serverutils.TestServerInterface, options feedTestOptions, +) (roachpb.TenantID, serverutils.TestTenantInterface, *gosql.DB, func()) { knobs := base.TestingKnobs{ DistSQL: &execinfra.TestingKnobs{Changefeed: &TestingKnobs{}}, JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), @@ -440,32 +450,30 @@ func startTestTenant( ExternalIODir: options.externalIODir, } - tenantServer, tenantDB := serverutils.StartTenant(t, kvServer, tenantArgs) + tenantServer, tenantDB := serverutils.StartTenant(t, systemServer, tenantArgs) // Re-run setup on the tenant as well tenantRunner := sqlutils.MakeSQLRunner(tenantDB) tenantRunner.ExecMultiple(t, strings.Split(serverSetupStatements, ";")...) - server := &testServerShim{tenantServer, kvServer} - // Log so that it is clear if a failed test happened - // to run on a tenant. - t.Logf("Running test using tenant %s", tenantID) - return server, tenantDB, func() { + waitForTenantPodsActive(t, tenantServer, 1) + + return tenantID, tenantServer, tenantDB, func() { tenantServer.Stopper().Stop(context.Background()) - log.Infof(context.Background(), "tenant server stopped") - cleanupCluster() - log.Infof(context.Background(), "cluster shut down") } } -type cdcTestFn func(*testing.T, *gosql.DB, cdctest.TestFeedFactory) +type cdcTestFn func(*testing.T, TestServer, cdctest.TestFeedFactory) +type cdcTestWithSystemFn func(*testing.T, TestServerWithSystem, cdctest.TestFeedFactory) type updateArgsFn func(args *base.TestServerArgs) type updateKnobsFn func(knobs *base.TestingKnobs) type feedTestOptions struct { - useTenant bool - argsFn updateArgsFn - knobsFn updateKnobsFn - externalIODir string + useTenant bool + argsFn updateArgsFn + knobsFn updateKnobsFn + externalIODir string + allowedSinkTypes []string + disabledSinkTypes []string } type feedTestOption func(opts *feedTestOptions) @@ -474,6 +482,22 @@ type feedTestOption func(opts *feedTestOptions) // from randomly running on a tenant. var feedTestNoTenants = func(opts *feedTestOptions) { opts.useTenant = false } +var feedTestForceSink = func(sinkType string) feedTestOption { + return feedTestRestrictSinks(sinkType) +} + +var feedTestRestrictSinks = func(sinkTypes ...string) feedTestOption { + return func(opts *feedTestOptions) { opts.allowedSinkTypes = append(opts.allowedSinkTypes, sinkTypes...) } +} + +var feedTestEnterpriseSinks = func(opts *feedTestOptions) { + feedTestOmitSinks("sinkless")(opts) +} + +var feedTestOmitSinks = func(sinkTypes ...string) feedTestOption { + return func(opts *feedTestOptions) { opts.disabledSinkTypes = append(opts.disabledSinkTypes, sinkTypes...) } +} + // withArgsFn is a feedTestOption that allow the caller to modify the // TestServerArgs before they are used to create the test server. Note // that in multi-tenant tests, these will only apply to the kvServer @@ -496,7 +520,7 @@ func newTestOptions() feedTestOptions { // percentTenant is the percentange of tests that will be run against // a SQL-node in a multi-tenant server. 1 for all tests to be run on a // tenant. - const percentTenant = 0.25 + const percentTenant = 1 return feedTestOptions{ useTenant: rand.Float32() < percentTenant, } @@ -510,128 +534,6 @@ func makeOptions(opts ...feedTestOption) feedTestOptions { return options } -func sinklessTest(testFn cdcTestFn, testOpts ...feedTestOption) func(*testing.T) { - return sinklessTestWithOptions(testFn, makeOptions(testOpts...)) -} - -func sinklessTestWithOptions(testFn cdcTestFn, opts feedTestOptions) func(*testing.T) { - return func(t *testing.T) { - s, db, stopServer := startTestServer(t, opts) - defer stopServer() - - sink, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(username.RootUser)) - defer cleanup() - f := makeSinklessFeedFactory(s, sink) - testFn(t, db, f) - } -} - -// RunRandomSink runs the testFn against one of a number of possible -// sinks. Sinkless is not included in the possible sinks. -func RunRandomSinkTest(t *testing.T, desc string, testFn cdcTestFn, testOpts ...feedTestOption) { - // TODO(ssd): It would be nice if explicitly selecting a test - // via -run/TESTS= would force it to always run. - switch p := rand.Float32(); { - case p < 0.20: - t.Run(fmt.Sprintf("enterprise/%s", desc), enterpriseTest(testFn, testOpts...)) - case p < 0.40: - t.Run(fmt.Sprintf("cloudstorage/%s", desc), cloudStorageTest(testFn, testOpts...)) - case p < 0.60: - t.Run(fmt.Sprintf("webhook/%s", desc), webhookTest(testFn, testOpts...)) - default: // Run kafka a bit more often - t.Run(fmt.Sprintf("kafka/%s", desc), kafkaTest(testFn, testOpts...)) - } -} - -func enterpriseTest(testFn cdcTestFn, testOpts ...feedTestOption) func(*testing.T) { - return enterpriseTestWithOptions(testFn, makeOptions(testOpts...)) -} - -func enterpriseTestWithOptions(testFn cdcTestFn, options feedTestOptions) func(*testing.T) { - return func(t *testing.T) { - s, db, stopServer := startTestServer(t, options) - defer stopServer() - - sink, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(username.RootUser)) - defer cleanup() - f := makeTableFeedFactory(s, db, sink) - - testFn(t, db, f) - } -} - -func cloudStorageTest(testFn cdcTestFn, testOpts ...feedTestOption) func(*testing.T) { - return cloudStorageTestWithOptions(testFn, makeOptions(testOpts...)) -} - -func cloudStorageTestWithOptions(testFn cdcTestFn, options feedTestOptions) func(*testing.T) { - return func(t *testing.T) { - if options.externalIODir == "" { - dir, dirCleanupFn := testutils.TempDir(t) - defer dirCleanupFn() - options.externalIODir = dir - } - oldKnobsFn := options.knobsFn - options.knobsFn = func(knobs *base.TestingKnobs) { - if oldKnobsFn != nil { - oldKnobsFn(knobs) - } - blobClientFactory := blobs.NewLocalOnlyBlobClientFactory(options.externalIODir) - if serverKnobs, ok := knobs.Server.(*server.TestingKnobs); ok { - serverKnobs.BlobClientFactory = blobClientFactory - } else { - knobs.Server = &server.TestingKnobs{ - BlobClientFactory: blobClientFactory, - } - } - } - s, db, stopServer := startTestServer(t, options) - defer stopServer() - - f := makeCloudFeedFactory(s, db, options.externalIODir) - testFn(t, db, f) - } -} - -func kafkaTest(testFn cdcTestFn, testOpts ...feedTestOption) func(t *testing.T) { - return kafkaTestWithOptions(testFn, makeOptions(testOpts...)) -} - -func kafkaTestWithOptions(testFn cdcTestFn, options feedTestOptions) func(*testing.T) { - return func(t *testing.T) { - s, db, stopServer := startTestServer(t, options) - defer stopServer() - f := makeKafkaFeedFactory(s, db) - testFn(t, db, f) - } -} - -func webhookTest(testFn cdcTestFn, testOpts ...feedTestOption) func(t *testing.T) { - return webhookTestWithOptions(testFn, makeOptions(testOpts...)) -} - -func webhookTestWithOptions(testFn cdcTestFn, options feedTestOptions) func(*testing.T) { - return func(t *testing.T) { - s, db, stopServer := startTestServer(t, options) - defer stopServer() - f := makeWebhookFeedFactory(s, db) - testFn(t, db, f) - } -} - -func pubsubTest(testFn cdcTestFn, testOpts ...feedTestOption) func(t *testing.T) { - return pubsubTestWithOptions(testFn, makeOptions(testOpts...)) -} - -func pubsubTestWithOptions(testFn cdcTestFn, options feedTestOptions) func(*testing.T) { - return func(t *testing.T) { - s, db, stopServer := startTestServer(t, options) - defer stopServer() - f := makePubsubFeedFactory(s, db) - testFn(t, db, f) - } -} - func serverArgsRegion(args base.TestServerArgs) string { for _, tier := range args.Locality.Tiers { if tier.Key == "region" { @@ -644,8 +546,8 @@ func serverArgsRegion(args base.TestServerArgs) string { // expectNotice creates a pretty crude database connection that doesn't involve // a lot of cdc test framework, use with caution. Driver-agnostic tools don't // have clean ways of inspecting incoming notices. -func expectNotice(t *testing.T, s serverutils.TestServerInterface, sql string, expected string) { - url, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(username.RootUser)) +func expectNotice(t *testing.T, s serverutils.TestTenantInterface, sql string, expected string) { + url, cleanup := sqlutils.PGUrl(t, s.SQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() base, err := pq.NewConnector(url.String()) if err != nil { @@ -683,6 +585,248 @@ func closeFeed(t testing.TB, f cdctest.TestFeed) { } } +// TestServer is a struct to allow tests to operate on a shared API regardless +// of a test running as the system tenant or a secondary tenant +type TestServer struct { + DB *gosql.DB + Server serverutils.TestTenantInterface + Codec keys.SQLCodec + TestingKnobs base.TestingKnobs +} + +// TestServerWithSystem provides access to the system db and server for a +// TestServer. This is useful for some tests that explicitly require access to +// the system tenant, for example if +// desctestutils.TestingGetPublicTableDescriptor is being called. +type TestServerWithSystem struct { + TestServer + SystemDB *gosql.DB + SystemServer serverutils.TestServerInterface +} + +func makeSystemServer( + t *testing.T, opts ...feedTestOption, +) (testServer TestServerWithSystem, cleanup func()) { + options := makeOptions(opts...) + return makeSystemServerWithOptions(t, options) +} + +var _ = makeSystemServer // silence unused warning + +func makeSystemServerWithOptions( + t *testing.T, options feedTestOptions, +) (testServer TestServerWithSystem, cleanup func()) { + systemServer, systemDB, clusterCleanup := startTestFullServer(t, options) + return TestServerWithSystem{ + TestServer: TestServer{ + DB: systemDB, + Server: systemServer, + TestingKnobs: systemServer.(*server.TestServer).Cfg.TestingKnobs, + Codec: keys.SystemSQLCodec, + }, + SystemServer: systemServer, + SystemDB: systemDB, + }, func() { + clusterCleanup() + } +} + +func makeTenantServer( + t *testing.T, opts ...feedTestOption, +) (testServer TestServerWithSystem, cleanup func()) { + options := makeOptions(opts...) + return makeTenantServerWithOptions(t, options) +} +func makeTenantServerWithOptions( + t *testing.T, options feedTestOptions, +) (testServer TestServerWithSystem, cleanup func()) { + systemServer, systemDB, clusterCleanup := startTestFullServer(t, options) + tenantID, tenantServer, tenantDB, tenantCleanup := startTestTenant(t, systemServer, options) + + return TestServerWithSystem{ + TestServer: TestServer{ + DB: tenantDB, + Server: tenantServer, + TestingKnobs: tenantServer.(*server.TestTenant).Cfg.TestingKnobs, + Codec: keys.MakeSQLCodec(tenantID), + }, + SystemDB: systemDB, + SystemServer: systemServer, + }, func() { + tenantCleanup() + clusterCleanup() + } +} + +func makeServer( + t *testing.T, opts ...feedTestOption, +) (testServer TestServerWithSystem, cleanup func()) { + options := makeOptions(opts...) + return makeServerWithOptions(t, options) +} + +func makeServerWithOptions( + t *testing.T, options feedTestOptions, +) (server TestServerWithSystem, cleanup func()) { + if options.useTenant { + t.Logf("making server as secondary tenant") + return makeTenantServerWithOptions(t, options) + } + t.Logf("making server as system tenant") + return makeSystemServerWithOptions(t, options) +} + +func randomSinkType(opts ...feedTestOption) string { + options := makeOptions(opts...) + return randomSinkTypeWithOptions(options) +} + +func randomSinkTypeWithOptions(options feedTestOptions) string { + sinkWeights := map[string]int{ + "kafka": 2, // run kafka a bit more often + "enterprise": 1, + "webhook": 1, + "pubsub": 1, + "sinkless": 1, + "cloudstorage": 0, // requires externalIODir set + } + if options.externalIODir != "" { + sinkWeights["cloudstorage"] = 1 + } + if options.allowedSinkTypes != nil { + sinkWeights = map[string]int{} + for _, sinkType := range options.allowedSinkTypes { + sinkWeights[sinkType] = 1 + } + } + if options.disabledSinkTypes != nil { + for _, sinkType := range options.disabledSinkTypes { + sinkWeights[sinkType] = 0 + } + } + weightTotal := 0 + for _, weight := range sinkWeights { + weightTotal += weight + } + p := rand.Float32() * float32(weightTotal) + var sum float32 = 0 + for sink, weight := range sinkWeights { + sum += float32(weight) + if p <= sum { + return sink + } + } + return "kafka" // unreachable +} + +// addCloudStorageOptions adds the options necessary to enable a server to run a +// cloudstorage changefeed on it +func addCloudStorageOptions(t *testing.T, options *feedTestOptions) (cleanup func()) { + dir, dirCleanupFn := testutils.TempDir(t) + options.externalIODir = dir + oldKnobsFn := options.knobsFn + options.knobsFn = func(knobs *base.TestingKnobs) { + if oldKnobsFn != nil { + oldKnobsFn(knobs) + } + blobClientFactory := blobs.NewLocalOnlyBlobClientFactory(options.externalIODir) + if serverKnobs, ok := knobs.Server.(*server.TestingKnobs); ok { + serverKnobs.BlobClientFactory = blobClientFactory + } else { + knobs.Server = &server.TestingKnobs{ + BlobClientFactory: blobClientFactory, + } + } + } + return dirCleanupFn +} + +func makeFeedFactory( + t *testing.T, + sinkType string, + s serverutils.TestTenantInterface, + db *gosql.DB, + testOpts ...feedTestOption, +) (factory cdctest.TestFeedFactory, sinkCleanup func()) { + options := makeOptions(testOpts...) + return makeFeedFactoryWithOptions(t, sinkType, s, db, options) +} + +func makeFeedFactoryWithOptions( + t *testing.T, + sinkType string, + s serverutils.TestTenantInterface, + db *gosql.DB, + options feedTestOptions, +) (factory cdctest.TestFeedFactory, sinkCleanup func()) { + t.Logf("making %s feed factory", sinkType) + switch sinkType { + case "kafka": + f := makeKafkaFeedFactory(s, db) + return f, func() {} + case "cloudstorage": + if options.externalIODir == "" { + t.Fatalf("expected externalIODir option to be set") + } + f := makeCloudFeedFactory(s, db, options.externalIODir) + return f, func() {} + case "enterprise": + sink, cleanup := sqlutils.PGUrl(t, s.SQLAddr(), t.Name(), url.User(username.RootUser)) + f := makeTableFeedFactory(s, db, sink) + return f, cleanup + case "webhook": + f := makeWebhookFeedFactory(s, db) + return f, func() {} + case "pubsub": + f := makePubsubFeedFactory(s, db) + return f, func() {} + case "sinkless": + sink, cleanup := sqlutils.PGUrl(t, s.SQLAddr(), t.Name(), url.User(username.RootUser)) + f := makeSinklessFeedFactory(s, sink) + return f, cleanup + } + t.Fatalf("unhandled sink type %s", sinkType) + return nil, nil +} + +func cdcTest(t *testing.T, testFn cdcTestFn, testOpts ...feedTestOption) { + cdcTestNamed(t, "", testFn, testOpts...) +} + +func cdcTestNamed(t *testing.T, name string, testFn cdcTestFn, testOpts ...feedTestOption) { + testFnWithSystem := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + testFn(t, s.TestServer, f) + } + cdcTestNamedWithSystem(t, "", testFnWithSystem, testOpts...) +} + +func cdcTestWithSystem(t *testing.T, testFn cdcTestWithSystemFn, testOpts ...feedTestOption) { + cdcTestNamedWithSystem(t, "", testFn, testOpts...) +} + +func cdcTestNamedWithSystem( + t *testing.T, name string, testFn cdcTestWithSystemFn, testOpts ...feedTestOption, +) { + t.Helper() + options := makeOptions(testOpts...) + cleanupCloudStorage := addCloudStorageOptions(t, &options) + + sinkType := randomSinkTypeWithOptions(options) + testLabel := sinkType + if name != "" { + testLabel = fmt.Sprintf("%s/%s", sinkType, name) + } + t.Run(testLabel, func(t *testing.T) { + testServer, cleanupServer := makeServerWithOptions(t, options) + feedFactory, cleanupSink := makeFeedFactoryWithOptions(t, sinkType, testServer.Server, testServer.DB, options) + defer cleanupServer() + defer cleanupSink() + defer cleanupCloudStorage() + + testFn(t, testServer, feedFactory) + }) +} + func forceTableGC( t testing.TB, tsi serverutils.TestServerInterface, diff --git a/pkg/ccl/changefeedccl/nemeses_test.go b/pkg/ccl/changefeedccl/nemeses_test.go index 0e0bbbaa3f56..907bdd169b85 100644 --- a/pkg/ccl/changefeedccl/nemeses_test.go +++ b/pkg/ccl/changefeedccl/nemeses_test.go @@ -9,7 +9,6 @@ package changefeedccl import ( - gosql "database/sql" "math" "regexp" "strings" @@ -27,13 +26,13 @@ func TestChangefeedNemeses(t *testing.T) { defer log.Scope(t).Close(t) skip.UnderRace(t, "takes >1 min under race") - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // TODO(dan): Ugly hack to disable `eventPause` in sinkless feeds. See comment in // `RunNemesis` for details. isSinkless := strings.Contains(t.Name(), "sinkless") - v, err := cdctest.RunNemesis(f, db, isSinkless) + v, err := cdctest.RunNemesis(f, s.DB, isSinkless) if err != nil { t.Fatalf("%+v", err) } @@ -47,10 +46,7 @@ func TestChangefeedNemeses(t *testing.T) { // // nemeses_test.go:39: pq: unimplemented: operation is // unsupported in multi-tenancy mode - t.Run(`sinkless`, sinklessTest(testFn, feedTestNoTenants)) - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) - t.Run(`cloudstorage`, cloudStorageTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) + cdcTest(t, testFn, feedTestNoTenants) log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) diff --git a/pkg/ccl/changefeedccl/show_changefeed_jobs_test.go b/pkg/ccl/changefeedccl/show_changefeed_jobs_test.go index c2d6ad164906..ba47ce410b55 100644 --- a/pkg/ccl/changefeedccl/show_changefeed_jobs_test.go +++ b/pkg/ccl/changefeedccl/show_changefeed_jobs_test.go @@ -10,7 +10,6 @@ package changefeedccl import ( "context" - gosql "database/sql" "fmt" "net/url" "sort" @@ -53,8 +52,8 @@ func TestShowChangefeedJobsBasic(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) foo := feed(t, f, `CREATE CHANGEFEED FOR foo`) @@ -105,10 +104,9 @@ func TestShowChangefeedJobsBasic(t *testing.T) { require.Equal(t, "json", out.format, "Expected format:%s but found format:%s", "json", out.format) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + // TODO: Webhook disabled since the query parameters on the sinkURI are + // correct but out of order + cdcTest(t, testFn, feedTestOmitSinks("webhook", "sinkless")) } func TestShowChangefeedJobs(t *testing.T) { @@ -338,8 +336,8 @@ func TestShowChangefeedJobsAlterChangefeed(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -427,5 +425,6 @@ func TestShowChangefeedJobsAlterChangefeed(t *testing.T) { require.Equal(t, "json", out.format, "Expected format:%s but found format:%s", "json", out.format) } - t.Run(`kafka`, kafkaTest(testFn)) + // Force kafka to validate topics + cdcTest(t, testFn, feedTestForceSink("kafka")) } diff --git a/pkg/ccl/changefeedccl/testfeed_test.go b/pkg/ccl/changefeedccl/testfeed_test.go index e4ecabd90774..6bb055244668 100644 --- a/pkg/ccl/changefeedccl/testfeed_test.go +++ b/pkg/ccl/changefeedccl/testfeed_test.go @@ -54,14 +54,14 @@ import ( ) type sinklessFeedFactory struct { - s serverutils.TestServerInterface + s serverutils.TestTenantInterface sink url.URL } // makeSinklessFeedFactory returns a TestFeedFactory implementation using the // `experimental-sql` uri. func makeSinklessFeedFactory( - s serverutils.TestServerInterface, sink url.URL, + s serverutils.TestTenantInterface, sink url.URL, ) cdctest.TestFeedFactory { return &sinklessFeedFactory{s: s, sink: sink} } @@ -87,7 +87,7 @@ func (f *sinklessFeedFactory) Feed(create string, args ...interface{}) (cdctest. } // Server implements the TestFeedFactory interface. -func (f *sinklessFeedFactory) Server() serverutils.TestServerInterface { +func (f *sinklessFeedFactory) Server() serverutils.TestTenantInterface { return f.s } @@ -578,7 +578,7 @@ func (di *depInjector) getJobFeed(jobID jobspb.JobID) *jobFeed { } type enterpriseFeedFactory struct { - s serverutils.TestServerInterface + s serverutils.TestTenantInterface di *depInjector db *gosql.DB } @@ -600,7 +600,7 @@ type tableFeedFactory struct { // makeTableFeedFactory returns a TestFeedFactory implementation using the // `experimental-sql` uri. func makeTableFeedFactory( - srv serverutils.TestServerInterface, db *gosql.DB, sink url.URL, + srv serverutils.TestTenantInterface, db *gosql.DB, sink url.URL, ) cdctest.TestFeedFactory { return &tableFeedFactory{ enterpriseFeedFactory: enterpriseFeedFactory{ @@ -664,7 +664,7 @@ func (f *tableFeedFactory) Feed( } // Server implements the TestFeedFactory interface. -func (f *tableFeedFactory) Server() serverutils.TestServerInterface { +func (f *tableFeedFactory) Server() serverutils.TestTenantInterface { return f.s } @@ -781,7 +781,7 @@ type cloudFeedFactory struct { // makeCloudFeedFactory returns a TestFeedFactory implementation using the cloud // storage uri. func makeCloudFeedFactory( - srv serverutils.TestServerInterface, db *gosql.DB, dir string, + srv serverutils.TestTenantInterface, db *gosql.DB, dir string, ) cdctest.TestFeedFactory { return &cloudFeedFactory{ enterpriseFeedFactory: enterpriseFeedFactory{ @@ -838,7 +838,7 @@ func (f *cloudFeedFactory) Feed( } // Server implements the TestFeedFactory interface. -func (f *cloudFeedFactory) Server() serverutils.TestServerInterface { +func (f *cloudFeedFactory) Server() serverutils.TestTenantInterface { return f.s } @@ -889,7 +889,7 @@ func reformatJSON(j interface{}) ([]byte, error) { // extractKeyFromJSONValue extracts the `WITH key_in_value` key from a `WITH // format=json, envelope=wrapped` value. func extractKeyFromJSONValue(wrapped []byte) (key []byte, value []byte, _ error) { - parsed := make(map[string]interface{}) + parsed := make(map[string]gojson.RawMessage) if err := gojson.Unmarshal(wrapped, &parsed); err != nil { return nil, nil, err } @@ -1136,7 +1136,7 @@ var _ cdctest.TestFeedFactory = (*kafkaFeedFactory)(nil) // makeKafkaFeedFactory returns a TestFeedFactory implementation using the `kafka` uri. func makeKafkaFeedFactory( - srv serverutils.TestServerInterface, db *gosql.DB, + srv serverutils.TestTenantInterface, db *gosql.DB, ) cdctest.TestFeedFactory { return &kafkaFeedFactory{ enterpriseFeedFactory: enterpriseFeedFactory{ @@ -1244,7 +1244,7 @@ func (k *kafkaFeedFactory) Feed(create string, args ...interface{}) (cdctest.Tes } // Server implements TestFeedFactory -func (k *kafkaFeedFactory) Server() serverutils.TestServerInterface { +func (k *kafkaFeedFactory) Server() serverutils.TestTenantInterface { return k.s } @@ -1343,7 +1343,7 @@ var _ cdctest.TestFeedFactory = (*webhookFeedFactory)(nil) // makeWebhookFeedFactory returns a TestFeedFactory implementation using the `webhook-webhooks` uri. func makeWebhookFeedFactory( - srv serverutils.TestServerInterface, db *gosql.DB, + srv serverutils.TestTenantInterface, db *gosql.DB, ) cdctest.TestFeedFactory { useSecure := rand.Float32() < 0.5 return &webhookFeedFactory{ @@ -1418,7 +1418,7 @@ func (f *webhookFeedFactory) Feed(create string, args ...interface{}) (cdctest.T return c, nil } -func (f *webhookFeedFactory) Server() serverutils.TestServerInterface { +func (f *webhookFeedFactory) Server() serverutils.TestTenantInterface { return f.s } @@ -1449,24 +1449,24 @@ func isResolvedTimestamp(message []byte) (bool, error) { // extractTopicFromJSONValue extracts the `WITH topic_in_value` topic from a `WITH // format=json, envelope=wrapped` value. func extractTopicFromJSONValue(wrapped []byte) (topic string, value []byte, _ error) { - parsed := make(map[string]interface{}) - if err := gojson.Unmarshal(wrapped, &parsed); err != nil { + parsedValue := make(map[string]gojson.RawMessage) + if err := gojson.Unmarshal(wrapped, &parsedValue); err != nil { return "", nil, err } - topicParsed := parsed[`topic`] - delete(parsed, `topic`) - - topic = fmt.Sprintf("%v", topicParsed) + if err := gojson.Unmarshal(parsedValue[`topic`], &topic); err != nil { + return "", nil, err + } + delete(parsedValue, `topic`) var err error - if value, err = reformatJSON(parsed); err != nil { + if value, err = reformatJSON(parsedValue); err != nil { return "", nil, err } return topic, value, nil } type webhookSinkTestfeedPayload struct { - Payload []interface{} `json:"payload"` - Length int `json:"length"` + Payload []gojson.RawMessage `json:"payload"` + Length int `json:"length"` } // extractValueFromJSONMessage extracts the value of the first element of @@ -1641,7 +1641,7 @@ var _ cdctest.TestFeedFactory = (*pubsubFeedFactory)(nil) // makePubsubFeedFactory returns a TestFeedFactory implementation using the `pubsub` uri. func makePubsubFeedFactory( - srv serverutils.TestServerInterface, db *gosql.DB, + srv serverutils.TestTenantInterface, db *gosql.DB, ) cdctest.TestFeedFactory { return &pubsubFeedFactory{ enterpriseFeedFactory: enterpriseFeedFactory{ @@ -1701,7 +1701,7 @@ func (p *pubsubFeedFactory) Feed(create string, args ...interface{}) (cdctest.Te } // Server implements TestFeedFactory -func (p *pubsubFeedFactory) Server() serverutils.TestServerInterface { +func (p *pubsubFeedFactory) Server() serverutils.TestTenantInterface { return p.s } diff --git a/pkg/ccl/changefeedccl/validations_test.go b/pkg/ccl/changefeedccl/validations_test.go index cf47254cd719..114c96304fcb 100644 --- a/pkg/ccl/changefeedccl/validations_test.go +++ b/pkg/ccl/changefeedccl/validations_test.go @@ -30,22 +30,22 @@ func TestCatchupScanOrdering(t *testing.T) { defer log.Scope(t).Close(t) defer utilccl.TestingEnableEnterprise()() - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { t.Run("bank", func(t *testing.T) { ctx := context.Background() const numRows, numRanges, payloadBytes, maxTransfer = 10, 10, 10, 999 gen := bank.FromConfig(numRows, numRows, payloadBytes, numRanges) var l workloadsql.InsertsDataLoader - if _, err := workloadsql.Setup(ctx, db, gen, l); err != nil { + if _, err := workloadsql.Setup(ctx, s.DB, gen, l); err != nil { t.Fatal(err) } var nowString string - require.NoError(t, db.QueryRow("SELECT cluster_logical_timestamp()").Scan(&nowString)) + require.NoError(t, s.DB.QueryRow("SELECT cluster_logical_timestamp()").Scan(&nowString)) existingChangeCount := 50 for i := 0; i < existingChangeCount; i++ { - if err := randomBankTransfer(numRows, maxTransfer, db); err != nil { + if err := randomBankTransfer(numRows, maxTransfer, s.DB); err != nil { t.Fatal(err) } } @@ -61,7 +61,7 @@ func TestCatchupScanOrdering(t *testing.T) { return nil } - if err := randomBankTransfer(numRows, maxTransfer, db); err != nil { + if err := randomBankTransfer(numRows, maxTransfer, s.DB); err != nil { return err } } @@ -102,8 +102,7 @@ func TestCatchupScanOrdering(t *testing.T) { // validations_test.go:40: executing ALTER TABLE bank SPLIT AT // VALUES (5): pq: unimplemented: operation is unsupported in // multi-tenancy mode - t.Run(`sinkless`, sinklessTest(testFn, feedTestNoTenants)) - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) + cdcTest(t, testFn, feedTestNoTenants) } // TODO(dan): This bit is copied from the bank workload. It's diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index ecf476d3524d..cd8efec2938c 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -652,6 +652,11 @@ func (t *TestTenant) DrainClients(ctx context.Context) error { return t.drain.drainClients(ctx, nil /* reporter */) } +// MustGetSQLCounter implements TestTenantInterface. +func (t *TestTenant) MustGetSQLCounter(name string) int64 { + return mustGetSQLCounterForRegistry(t.metricsRegistry, name) +} + // StartTenant starts a SQL tenant communicating with this TestServer. func (ts *TestServer) StartTenant( ctx context.Context, params base.TestTenantArgs, @@ -892,38 +897,9 @@ func (v *v2AuthDecorator) RoundTrip(r *http.Request) (*http.Response, error) { return v.RoundTripper.RoundTrip(r) } -// MustGetSQLCounter implements TestServerInterface. +// MustGetSQLCounter implements TestTenantInterface. func (ts *TestServer) MustGetSQLCounter(name string) int64 { - var c int64 - var found bool - - type ( - int64Valuer interface{ Value() int64 } - int64Counter interface{ Count() int64 } - ) - - ts.registry.Each(func(n string, v interface{}) { - if name == n { - switch t := v.(type) { - case *metric.Counter: - c = t.Count() - found = true - case *metric.Gauge: - c = t.Value() - found = true - case int64Valuer: - c = t.Value() - found = true - case int64Counter: - c = t.Count() - found = true - } - } - }) - if !found { - panic(fmt.Sprintf("couldn't find metric %s", name)) - } - return c + return mustGetSQLCounterForRegistry(ts.registry, name) } // MustGetSQLNetworkCounter implements TestServerInterface. @@ -1422,3 +1398,36 @@ func (testServerFactoryImpl) New(params base.TestServerArgs) (interface{}, error return ts, nil } + +func mustGetSQLCounterForRegistry(registry *metric.Registry, name string) int64 { + var c int64 + var found bool + + type ( + int64Valuer interface{ Value() int64 } + int64Counter interface{ Count() int64 } + ) + + registry.Each(func(n string, v interface{}) { + if name == n { + switch t := v.(type) { + case *metric.Counter: + c = t.Count() + found = true + case *metric.Gauge: + c = t.Value() + found = true + case int64Valuer: + c = t.Value() + found = true + case int64Counter: + c = t.Count() + found = true + } + } + }) + if !found { + panic(fmt.Sprintf("couldn't find metric %s", name)) + } + return c +} diff --git a/pkg/testutils/serverutils/test_tenant_shim.go b/pkg/testutils/serverutils/test_tenant_shim.go index 9e724e27755f..4c798df1b470 100644 --- a/pkg/testutils/serverutils/test_tenant_shim.go +++ b/pkg/testutils/serverutils/test_tenant_shim.go @@ -136,6 +136,10 @@ type TestTenantInterface interface { // SystemConfigProvider provides access to the system config. SystemConfigProvider() config.SystemConfigProvider + // MustGetSQLCounter returns the value of a counter metric from the server's + // SQL Executor. Runs in O(# of metrics) time, which is fine for test code. + MustGetSQLCounter(name string) int64 + // TODO(irfansharif): We'd benefit from an API to construct a *gosql.DB, or // better yet, a *sqlutils.SQLRunner. We use it all the time, constructing // it by hand each time. From 509e4a01b047d69973715eab1f041af8f614d602 Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Fri, 27 May 2022 17:17:53 -0400 Subject: [PATCH 4/9] backupccl: display up to 10 missing files in `SHOW BACKUP .. with check_files` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, `SHOW BACKUP WITH check_files` displayed the first missing SST. This patch will display up to 100 missing SSTs. Further, this renames the misleading `approximateTablePhysicalSize` to `approximateSpanPhysicalSize`. Below I write out how physical table size is calculated: 1. Each range we backup maps to 1 to many spans (currently in the backup_manfest.files object). 2. 1 to many spans get written to an SST. No span will get written to multiple SSTs. 3. When backup created these spans, it tried really hard to split spans at table boundaries, so only one table’s data could be in a span, but a last minute table creation makes this near impossible, due to slow range splits. A big table will have many spans. 4. To compute the approximate logical size (called size_bytes in SHOW BACKUP) of each table, we sum the logical bytes over all it’s spans. We identify a table’s span by checking the table prefix of the first key in the span. See getTableSizes method) 5. To compute the physical size (file_bytes in SHOW BACKUP) of a span, compute the logical size of each SST by summing the logical bytes in the SST over its spans (see getLogicalSSTSize method), and attribute a portion of the physical SST size (returned from cloud storage) to a span using the formula: (sstPhysicalSize) * (logicalSpanSize) / (logicalSSTSize) = physicalSpanSize ( the approximateSpanTableSize method implements this). 6. To compute the physical size of a table, sum over the physical sizes the table’s spans Release note (sql change): SHOW BACKUP WITH check_files will display up to 10 missing SSTs. --- pkg/ccl/backupccl/show.go | 45 ++++++++++++++++---- pkg/ccl/backupccl/show_test.go | 78 ++++++++++++++++++---------------- 2 files changed, 79 insertions(+), 44 deletions(-) diff --git a/pkg/ccl/backupccl/show.go b/pkg/ccl/backupccl/show.go index 898d50b11d84..fb80dc203877 100644 --- a/pkg/ccl/backupccl/show.go +++ b/pkg/ccl/backupccl/show.go @@ -11,6 +11,7 @@ package backupccl import ( "context" "path" + "sort" "strings" "time" @@ -486,6 +487,8 @@ func checkBackupFiles( storeFactory cloud.ExternalStorageFromURIFactory, user username.SQLUsername, ) ([][]int64, error) { + const maxMissingFiles = 10 + missingFiles := make(map[string]struct{}, maxMissingFiles) checkLayer := func(layer int) ([]int64, error) { // TODO (msbutler): Right now, checkLayer opens stores for each backup layer. In 22.2, @@ -546,10 +549,19 @@ func checkBackupFiles( } sz, err := store.Size(ctx, f.Path) if err != nil { - return nil, errors.Wrapf(err, "Error checking file %s in %s", f.Path, uri) + uriNoLocality := strings.Split(uri, "?")[0] + missingFile := path.Join(uriNoLocality, f.Path) + if _, ok := missingFiles[missingFile]; !ok { + missingFiles[missingFile] = struct{}{} + if maxMissingFiles == len(missingFiles) { + break + } + } + continue } fileSizes[i] = sz } + return fileSizes, nil } @@ -559,8 +571,23 @@ func checkBackupFiles( if err != nil { return nil, err } + if len(missingFiles) == maxMissingFiles { + break + } manifestFileSizes[layer] = layerFileSizes } + if len(missingFiles) > 0 { + filesForMsg := make([]string, 0, len(missingFiles)) + for file := range missingFiles { + filesForMsg = append(filesForMsg, file) + } + errorMsgPrefix := "The following files are missing from the backup:" + if len(missingFiles) == maxMissingFiles { + errorMsgPrefix = "Multiple files cannot be read from the backup including:" + } + sort.Strings(filesForMsg) + return nil, errors.Newf("%s\n\t%s", errorMsgPrefix, strings.Join(filesForMsg, "\n\t")) + } return manifestFileSizes, nil } @@ -828,11 +855,11 @@ func getLogicalSSTSize(files []backuppb.BackupManifest_File) map[string]int64 { return sstDataSize } -// approximateTablePhysicalSize approximates the number bytes written to disk for the table. -func approximateTablePhysicalSize( - logicalTableSize int64, logicalFileSize int64, sstFileSize int64, +// approximateSpanPhysicalSize approximates the number of bytes written to disk for the span. +func approximateSpanPhysicalSize( + logicalSpanSize int64, logicalSSTSize int64, physicalSSTSize int64, ) int64 { - return int64(float64(sstFileSize) * (float64(logicalTableSize) / float64(logicalFileSize))) + return int64(float64(physicalSSTSize) * (float64(logicalSpanSize) / float64(logicalSSTSize))) } // getTableSizes gathers row and size count for each table in the manifest @@ -868,7 +895,8 @@ func getTableSizes( s := tableSizes[descpb.ID(tableID)] s.rowCount.Add(file.EntryCounts) if len(fileSizes) > 0 { - s.fileSize = approximateTablePhysicalSize(s.rowCount.DataSize, logicalSSTSize[file.Path], fileSizes[i]) + s.fileSize += approximateSpanPhysicalSize(file.EntryCounts.DataSize, logicalSSTSize[file.Path], + fileSizes[i]) } tableSizes[descpb.ID(tableID)] = s } @@ -993,7 +1021,7 @@ func backupShowerFileSetup(inCol tree.StringOrPlaceholderOptList) backupShower { backupType = "incremental" } - sstDataSize := getLogicalSSTSize(manifest.Files) + logicalSSTSize := getLogicalSSTSize(manifest.Files) for j, file := range manifest.Files { filePath := file.Path if inCol != nil { @@ -1008,7 +1036,8 @@ func backupShowerFileSetup(inCol tree.StringOrPlaceholderOptList) backupShower { } sz := int64(-1) if len(info.fileSizes) > 0 { - sz = approximateTablePhysicalSize(info.fileSizes[i][j], file.EntryCounts.DataSize, sstDataSize[file.Path]) + sz = approximateSpanPhysicalSize(file.EntryCounts.DataSize, + logicalSSTSize[file.Path], info.fileSizes[i][j]) } rows = append(rows, tree.Datums{ tree.NewDString(filePath), diff --git a/pkg/ccl/backupccl/show_test.go b/pkg/ccl/backupccl/show_test.go index 0f6db6eadf66..a3a121a9d9a6 100644 --- a/pkg/ccl/backupccl/show_test.go +++ b/pkg/ccl/backupccl/show_test.go @@ -839,10 +839,13 @@ func TestShowBackupPathIsCollectionRoot(t *testing.T) { "SHOW BACKUP $1", localFoo) } -// TestShowBackupCheckFiles verifies the check_files option catches a corrupt backup file -// in 3 scenarios: 1. SST from a full backup; 2. SST from a default incremental backup; 3. -// SST from an incremental backup created with the incremental_location parameter. -// The first two scenarios also get checked with locality aware backups. +// TestShowBackupCheckFiles verifies the check_files option catches a corrupt +// backup file in 3 scenarios: 1. SST from a full backup; 2. SST from a default +// incremental backup; 3. SST from an incremental backup created with the +// incremental_location parameter. The first two scenarios also get checked with +// locality aware backups. The test also sanity checks the new file_bytes column +// in SHOW BACKUP with check_files, which displays the physical size of each +// table in the backup. func TestShowBackupCheckFiles(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -855,8 +858,8 @@ func TestShowBackupCheckFiles(t *testing.T) { collectionRoot := "full" incLocRoot := "inc" - const c1, c2, c3 = `nodelocal://0/full`, `nodelocal://1/full`, `nodelocal://2/full` - const i1, i2, i3 = `nodelocal://0/inc`, `nodelocal://1/inc`, `nodelocal://2/inc` + const c1, c2, c3 = `nodelocal://1/full`, `nodelocal://2/full`, `nodelocal://3/full` + const i1, i2, i3 = `nodelocal://1/inc`, `nodelocal://2/inc`, `nodelocal://3/inc` localities := []string{"default", "dc=dc1", "dc=dc2"} collections := []string{ @@ -879,7 +882,6 @@ func TestShowBackupCheckFiles(t *testing.T) { {dest: collections, inc: incrementals, localities: localities}, } - // create db sqlDB.Exec(t, `CREATE DATABASE fkdb`) sqlDB.Exec(t, `CREATE TABLE fkdb.fk (ind INT)`) @@ -905,11 +907,23 @@ func TestShowBackupCheckFiles(t *testing.T) { sqlDB.Exec(t, fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s", dest)) + // breakCheckFiles validates that moving an SST will cause SHOW BACKUP with check_files to + // error. breakCheckFiles := func( + // rootDir identifies the root of the collection or incremental backup dir + // of the target backup file. rootDir string, + + // backupDest contains the collection or incremental URIs. backupDest []string, + + // file contains the path to the target file staring from the rootDir. file string, + + // fileLocality contains the expected locality of the target file. fileLocality string, + + // checkQuery contains the 'SHOW BACKUP with check_files' query. checkQuery string) { // Ensure no errors first @@ -921,65 +935,57 @@ func TestShowBackupCheckFiles(t *testing.T) { require.NoError(t, err, "failed to corrupt SST") } - // To validate the checkFiles error message, resolve the full path of the missing file. - // For locality aware backups, it may not live at the default URI (e.g. backupDest[0]). - var fileDest string + // If the file is from a locality aware backup, check its locality info. Note + // that locality aware URIs have the following structure: + // `someURI?COCKROACH_LOCALITY ='locality'`. if fileLocality == "NULL" { - fileDest = backupDest[0] } else { + var locality string fileLocality = url.QueryEscape(fileLocality) for _, destURI := range backupDest { - // Locality aware URIs have the following structure: - // `someURI?COCKROACH_LOCALITY='locality'`. Given the fileLocality, we - // match for the proper URI. + // Using the locality, match for the proper URI. destLocality := strings.Split(destURI, "?") if strings.Contains(destLocality[1], fileLocality) { - fileDest = destURI + locality = destLocality[1] break } } + require.NotEmpty(t, locality, "could not find file locality") } - require.NotEmpty(t, fileDest, "could not find file locality") - - // The full error message looks like "Error checking file - // data/756930828574818306.sst in nodelocal://0/full/2022/04/27-134916.90". - // - // Note that the expected error message excludes the path to the data file - // to avoid a test flake for locality aware backups where two different - // nodelocal URI's read to the same place. In this scenario, the test - // expects the backup to be in nodelocal://1/foo and the actual error - // message resolves the uri to nodelocal://0/foo. While both are correct, - // the test fails. - - // Get Path after /data dir - toFile := "data" + strings.Split(file, "/data")[1] - errorMsg := fmt.Sprintf("Error checking file %s", toFile) + + // Note that the expected error message excludes the nodelocal portion of + // the file path (nodelocal://1/) to avoid a test flake for locality aware + // backups where two different nodelocal URI's read to the same place. In + // this scenario, the test expects the backup to be in nodelocal://2/foo + // and the actual error message resolves the uri to nodelocal://1/foo. + // While both are correct, the test fails. + errorMsg := fmt.Sprintf("The following files are missing from the backup:\n\t.*%s", + filepath.Join(rootDir, file)) sqlDB.ExpectErr(t, errorMsg, checkQuery) if err := os.Rename(badPath, fullPath); err != nil { require.NoError(t, err, "failed to de-corrupt SST") } } - fileInfo := sqlDB.QueryStr(t, fmt.Sprintf(`SELECT path, locality, file_bytes FROM [SHOW BACKUP FILES FROM LATEST IN %s with check_files]`, dest)) checkQuery := fmt.Sprintf(`SHOW BACKUP FROM LATEST IN %s WITH check_files`, dest) - // break on full backup + // Break on full backup. breakCheckFiles(collectionRoot, test.dest, fileInfo[0][0], fileInfo[0][1], checkQuery) - // break on default inc backup + // Break on default inc backup. breakCheckFiles(collectionRoot, test.dest, fileInfo[len(fileInfo)-1][0], fileInfo[len(fileInfo)-1][1], checkQuery) - // check that each file size is positive + // Check that each file size is positive. for _, file := range fileInfo { sz, err := strconv.Atoi(file[2]) require.NoError(t, err, "could not get file size") require.Greater(t, sz, 0, "file size is not positive") } - // check that returned file size is consistent across flavors of SHOW BACKUP + // Check that the returned file size is consistent across flavors of SHOW BACKUP. fileSum := sqlDB.QueryStr(t, fmt.Sprintf(`SELECT sum(file_bytes) FROM [SHOW BACKUP FILES FROM LATEST IN %s with check_files]`, dest)) @@ -988,7 +994,7 @@ func TestShowBackupCheckFiles(t *testing.T) { fileSum) if len(test.dest) == 1 { - // break on an incremental backup stored at incremental_location + // Break on an incremental backup stored at incremental_location. fileInfo := sqlDB.QueryStr(t, fmt.Sprintf(`SELECT path, locality FROM [SHOW BACKUP FILES FROM LATEST IN %s WITH incremental_location = %s]`, dest, inc)) From c28f1eee3de37096432fedd5bfc72836e33746a1 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Sat, 4 Jun 2022 10:24:54 +0000 Subject: [PATCH 5/9] storage: tweak `newMVCCIterator()` This patch tweaks `newMVCCIterator()` for use with MVCC range tombstones, and uses it for all appropriate MVCC operations. Release note: None --- pkg/storage/mvcc.go | 56 +++++++++++++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 17 deletions(-) diff --git a/pkg/storage/mvcc.go b/pkg/storage/mvcc.go index faaf4b8048c2..eec9da75c4b2 100644 --- a/pkg/storage/mvcc.go +++ b/pkg/storage/mvcc.go @@ -708,12 +708,26 @@ func (opts *MVCCGetOptions) validate() error { return nil } -func newMVCCIterator(reader Reader, inlineMeta bool, opts IterOptions) MVCCIterator { - iterKind := MVCCKeyAndIntentsIterKind - if inlineMeta { - iterKind = MVCCKeyIterKind - } - return reader.NewMVCCIterator(iterKind, opts) +// newMVCCIterator sets up a suitable iterator for high-level MVCC operations +// operating at the given timestamp. If timestamp is empty, the iterator is +// considered to be used for inline values, disabling intents and range keys. +// If rangeKeyMasking is true, IterOptions.RangeKeyMaskingBelow is set to the +// given timestamp. +func newMVCCIterator( + reader Reader, timestamp hlc.Timestamp, rangeKeyMasking bool, opts IterOptions, +) MVCCIterator { + // If reading inline then just return a plain MVCCIterator without intents. + // We also disable range keys, since they're not allowed across inline values. + if timestamp.IsEmpty() { + opts.KeyTypes = IterKeyTypePointsOnly + return reader.NewMVCCIterator(MVCCKeyIterKind, opts) + } + // Enable range key masking if requested. + if rangeKeyMasking && opts.KeyTypes != IterKeyTypePointsOnly && + opts.RangeKeyMaskingBelow.IsEmpty() { + opts.RangeKeyMaskingBelow = timestamp + } + return reader.NewMVCCIterator(MVCCKeyAndIntentsIterKind, opts) } // MVCCGet returns the most recent value for the specified key whose timestamp @@ -743,7 +757,7 @@ func newMVCCIterator(reader Reader, inlineMeta bool, opts IterOptions) MVCCItera func MVCCGet( ctx context.Context, reader Reader, key roachpb.Key, timestamp hlc.Timestamp, opts MVCCGetOptions, ) (*roachpb.Value, *roachpb.Intent, error) { - iter := newMVCCIterator(reader, timestamp.IsEmpty(), IterOptions{Prefix: true}) + iter := newMVCCIterator(reader, timestamp, !opts.Tombstones, IterOptions{Prefix: true}) defer iter.Close() value, intent, err := mvccGet(ctx, iter, key, timestamp, opts) return value.ToPointer(), intent, err @@ -1024,7 +1038,7 @@ func MVCCPut( var iter MVCCIterator blind := ms == nil && timestamp.IsEmpty() if !blind { - iter = rw.NewMVCCIterator(MVCCKeyAndIntentsIterKind, IterOptions{Prefix: true}) + iter = newMVCCIterator(rw, timestamp, false /* rangeKeyMasking */, IterOptions{Prefix: true}) defer iter.Close() } return mvccPutUsingIter(ctx, rw, iter, ms, key, timestamp, localTimestamp, value, txn, nil) @@ -1068,7 +1082,7 @@ func MVCCDelete( localTimestamp hlc.ClockTimestamp, txn *roachpb.Transaction, ) error { - iter := newMVCCIterator(rw, timestamp.IsEmpty(), IterOptions{Prefix: true}) + iter := newMVCCIterator(rw, timestamp, false /* rangeKeyMasking */, IterOptions{Prefix: true}) defer iter.Close() return mvccPutUsingIter(ctx, rw, iter, ms, key, timestamp, localTimestamp, noValue, txn, nil) @@ -1732,7 +1746,7 @@ func MVCCIncrement( txn *roachpb.Transaction, inc int64, ) (int64, error) { - iter := newMVCCIterator(rw, timestamp.IsEmpty(), IterOptions{Prefix: true}) + iter := newMVCCIterator(rw, timestamp, false /* rangeKeyMasking */, IterOptions{Prefix: true}) defer iter.Close() var int64Val int64 @@ -1806,7 +1820,7 @@ func MVCCConditionalPut( allowIfDoesNotExist CPutMissingBehavior, txn *roachpb.Transaction, ) error { - iter := newMVCCIterator(rw, timestamp.IsEmpty(), IterOptions{Prefix: true}) + iter := newMVCCIterator(rw, timestamp, false /* rangeKeyMasking */, IterOptions{Prefix: true}) defer iter.Close() return mvccConditionalPutUsingIter( @@ -1888,7 +1902,7 @@ func MVCCInitPut( failOnTombstones bool, txn *roachpb.Transaction, ) error { - iter := newMVCCIterator(rw, timestamp.IsEmpty(), IterOptions{Prefix: true}) + iter := newMVCCIterator(rw, timestamp, false /* rangeKeyMasking */, IterOptions{Prefix: true}) defer iter.Close() return mvccInitPutUsingIter(ctx, rw, iter, ms, key, timestamp, localTimestamp, value, failOnTombstones, txn) } @@ -2269,7 +2283,7 @@ func MVCCDeleteRange( buf := newPutBuffer() defer buf.release() - iter := newMVCCIterator(rw, timestamp.IsEmpty(), IterOptions{Prefix: true}) + iter := newMVCCIterator(rw, timestamp, false /* rangeKeyMasking */, IterOptions{Prefix: true}) defer iter.Close() var keys []roachpb.Key @@ -2573,7 +2587,10 @@ func MVCCScan( timestamp hlc.Timestamp, opts MVCCScanOptions, ) (MVCCScanResult, error) { - iter := newMVCCIterator(reader, timestamp.IsEmpty(), IterOptions{LowerBound: key, UpperBound: endKey}) + iter := newMVCCIterator(reader, timestamp, !opts.Tombstones, IterOptions{ + LowerBound: key, + UpperBound: endKey, + }) defer iter.Close() return mvccScanToKvs(ctx, iter, key, endKey, timestamp, opts) } @@ -2586,7 +2603,10 @@ func MVCCScanToBytes( timestamp hlc.Timestamp, opts MVCCScanOptions, ) (MVCCScanResult, error) { - iter := newMVCCIterator(reader, timestamp.IsEmpty(), IterOptions{LowerBound: key, UpperBound: endKey}) + iter := newMVCCIterator(reader, timestamp, !opts.Tombstones, IterOptions{ + LowerBound: key, + UpperBound: endKey, + }) defer iter.Close() return mvccScanToBytes(ctx, iter, key, endKey, timestamp, opts) } @@ -2629,8 +2649,10 @@ func MVCCIterate( opts MVCCScanOptions, f func(roachpb.KeyValue) error, ) ([]roachpb.Intent, error) { - iter := newMVCCIterator( - reader, timestamp.IsEmpty(), IterOptions{LowerBound: key, UpperBound: endKey}) + iter := newMVCCIterator(reader, timestamp, !opts.Tombstones, IterOptions{ + LowerBound: key, + UpperBound: endKey, + }) defer iter.Close() var intents []roachpb.Intent From dc80d3cce7f89896bd81c32ac8caf0be094cb961 Mon Sep 17 00:00:00 2001 From: richardjcai Date: Wed, 1 Jun 2022 11:26:06 -0400 Subject: [PATCH 6/9] kv, gossip: remove misc deprecated system config code There is still some work left to actually remove `SystemConfigSpan` or at least update `SystemConfigSpan` to contain just system.descriptor and system.zones. https://github.com/cockroachdb/cockroach/pull/76279 Release note: None --- pkg/config/system.go | 3 + pkg/gossip/gossip.go | 31 ---- pkg/kv/kvclient/kvcoord/txn_coord_sender.go | 26 ---- .../kvcoord/txn_interceptor_committer_test.go | 4 +- .../kvserver/batcheval/cmd_end_transaction.go | 37 ----- .../batcheval/cmd_end_transaction_test.go | 4 +- pkg/kv/kvserver/client_lease_test.go | 132 ------------------ pkg/kv/kvserver/client_replica_test.go | 9 +- pkg/kv/kvserver/gossip_test.go | 96 ------------- pkg/kv/kvserver/mvcc_gc_queue_test.go | 12 +- pkg/kv/kvserver/queue_test.go | 15 +- pkg/kv/kvserver/replica.go | 12 -- pkg/kv/kvserver/replica_gossip.go | 86 ------------ pkg/kv/kvserver/replica_proposal.go | 23 --- pkg/kv/kvserver/replica_test.go | 77 +--------- pkg/kv/kvserver/split_queue_test.go | 6 +- pkg/kv/kvserver/store.go | 2 +- pkg/kv/mock_transactional_sender.go | 6 - pkg/kv/sender.go | 11 -- pkg/kv/txn.go | 44 +----- pkg/kv/txn_test.go | 21 --- pkg/roachpb/data.go | 2 - pkg/roachpb/data.proto | 2 +- pkg/server/BUILD.bazel | 2 - pkg/server/server_test.go | 111 --------------- pkg/spanconfig/spanconfigstore/store.go | 1 + pkg/sql/catalog/descs/txn.go | 10 -- pkg/sql/gcjob/BUILD.bazel | 1 - pkg/sql/gcjob/descriptor_utils.go | 8 -- pkg/sql/opt/exec/execbuilder/BUILD.bazel | 1 - pkg/sql/opt/exec/execbuilder/relational.go | 18 --- pkg/sql/opt_exec_factory.go | 24 ---- pkg/sql/plan.go | 15 -- pkg/sql/schema_changer.go | 7 - pkg/sql/tests/BUILD.bazel | 3 - pkg/sql/tests/end_txn_trigger.go | 68 --------- pkg/sql/tests/server_params.go | 1 - pkg/sql/txn_restart_test.go | 1 - 38 files changed, 37 insertions(+), 895 deletions(-) delete mode 100644 pkg/sql/tests/end_txn_trigger.go diff --git a/pkg/config/system.go b/pkg/config/system.go index 2a38770400f7..1283a7ae7cbf 100644 --- a/pkg/config/system.go +++ b/pkg/config/system.go @@ -81,6 +81,9 @@ type zoneEntry struct { // saying whether or not it should be considered for splitting at all. // A database descriptor or a table view descriptor are examples of IDs // that should not be considered for splits. +// NB: SystemConfig can be updated to only contain system.descriptor and +// system.zones. We still need SystemConfig for SystemConfigProvider which is +// used in replication reports and the opt catalog. type SystemConfig struct { SystemConfigEntries DefaultZoneConfig *zonepb.ZoneConfig diff --git a/pkg/gossip/gossip.go b/pkg/gossip/gossip.go index 97c7f117d8c0..8160efbd4c65 100644 --- a/pkg/gossip/gossip.go +++ b/pkg/gossip/gossip.go @@ -1145,37 +1145,6 @@ func (g *Gossip) RegisterCallback(pattern string, method Callback, opts ...Callb } } -// DeprecatedGetSystemConfig returns the local unmarshaled version of the system config. -// Returns nil if the system config hasn't been set yet. -// -// TODO(ajwerner): Remove this in 22.2. -func (g *Gossip) DeprecatedGetSystemConfig() *config.SystemConfig { - g.systemConfigMu.RLock() - defer g.systemConfigMu.RUnlock() - return g.systemConfig -} - -// DeprecatedRegisterSystemConfigChannel registers a channel to signify updates for the -// system config. It is notified after registration (if a system config is -// already set), and whenever a new system config is successfully unmarshaled. -// -// TODO(ajwerner): Remove this in 22.2. -func (g *Gossip) DeprecatedRegisterSystemConfigChannel() <-chan struct{} { - // Create channel that receives new system config notifications. - // The channel has a size of 1 to prevent gossip from having to block on it. - c := make(chan struct{}, 1) - - g.systemConfigMu.Lock() - defer g.systemConfigMu.Unlock() - g.systemConfigChannels = append(g.systemConfigChannels, c) - - // Notify the channel right away if we have a config. - if g.systemConfig != nil { - c <- struct{}{} - } - return c -} - // updateSystemConfig is the raw gossip info callback. Unmarshal the // system config, and if successful, send on each system config // channel. diff --git a/pkg/kv/kvclient/kvcoord/txn_coord_sender.go b/pkg/kv/kvclient/kvcoord/txn_coord_sender.go index cb6f71799d48..5fcd8c96fc52 100644 --- a/pkg/kv/kvclient/kvcoord/txn_coord_sender.go +++ b/pkg/kv/kvclient/kvcoord/txn_coord_sender.go @@ -11,12 +11,10 @@ package kvcoord import ( - "bytes" "context" "fmt" "runtime/debug" - "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" @@ -984,30 +982,6 @@ func sanityCheckErrWithTxn( return err } -// setTxnAnchorKey sets the key at which to anchor the transaction record. The -// transaction anchor key defaults to the first key written in a transaction. -func (tc *TxnCoordSender) setTxnAnchorKeyLocked(key roachpb.Key) error { - if len(tc.mu.txn.Key) != 0 { - return errors.Errorf("transaction anchor key already set") - } - tc.mu.txn.Key = key - return nil -} - -// AnchorOnSystemConfigRange is part of the client.TxnSender interface. -func (tc *TxnCoordSender) AnchorOnSystemConfigRange() error { - tc.mu.Lock() - defer tc.mu.Unlock() - // Allow this to be called more than once. - if bytes.Equal(tc.mu.txn.Key, keys.SystemConfigSpan.Key) { - return nil - } - // The system-config trigger must be run on the system-config range which - // means any transaction with the trigger set needs to be anchored to the - // system-config range. - return tc.setTxnAnchorKeyLocked(keys.SystemConfigSpan.Key) -} - // TxnStatus is part of the client.TxnSender interface. func (tc *TxnCoordSender) TxnStatus() roachpb.TransactionStatus { tc.mu.Lock() diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_committer_test.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_committer_test.go index 52a132054af0..c0a58c820536 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_committer_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_committer_test.go @@ -243,9 +243,7 @@ func TestTxnCommitterStripsInFlightWrites(t *testing.T) { ba.Requests = nil etArgsWithTrigger := etArgs etArgsWithTrigger.InternalCommitTrigger = &roachpb.InternalCommitTrigger{ - ModifiedSpanTrigger: &roachpb.ModifiedSpanTrigger{ - SystemConfigSpan: true, - }, + ModifiedSpanTrigger: &roachpb.ModifiedSpanTrigger{NodeLivenessSpan: &roachpb.Span{}}, } ba.Add(&putArgs, &qiArgs, &etArgsWithTrigger) diff --git a/pkg/kv/kvserver/batcheval/cmd_end_transaction.go b/pkg/kv/kvserver/batcheval/cmd_end_transaction.go index 734c8b3065d3..76f975c4b892 100644 --- a/pkg/kv/kvserver/batcheval/cmd_end_transaction.go +++ b/pkg/kv/kvserver/batcheval/cmd_end_transaction.go @@ -18,7 +18,6 @@ import ( "sync/atomic" "time" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/abortspan" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result" @@ -425,17 +424,6 @@ func EndTxn( if err := txnResult.MergeAndDestroy(triggerResult); err != nil { return result.Result{}, err } - } else if reply.Txn.Status == roachpb.ABORTED { - // If this is the system config span and we're aborted, add a trigger to - // potentially gossip now that we've removed an intent. This is important - // to deal with cases where previously committed values were not gossipped - // due to an outstanding intent. - if cArgs.EvalCtx.ContainsKey(keys.SystemConfigSpan.Key) && - !cArgs.EvalCtx.ClusterSettings().Version.IsActive( - ctx, clusterversion.DisableSystemConfigGossipTrigger, - ) { - txnResult.Local.MaybeGossipSystemConfigIfHaveFailure = true - } } return txnResult, nil @@ -691,31 +679,6 @@ func RunCommitTrigger( } if ct.GetModifiedSpanTrigger() != nil { var pd result.Result - if ct.ModifiedSpanTrigger.SystemConfigSpan { - // Check if we need to gossip the system config. - // NOTE: System config gossiping can only execute correctly if - // the transaction record is located on the range that contains - // the system span. If a transaction is created which modifies - // both system *and* non-system data, it should be ensured that - // the transaction record itself is on the system span. This can - // be done by making sure a system key is the first key touched - // in the transaction. - if rec.ContainsKey(keys.SystemConfigSpan.Key) { - if err := pd.MergeAndDestroy( - result.Result{ - Local: result.LocalResult{ - MaybeGossipSystemConfig: true, - }, - }, - ); err != nil { - return result.Result{}, err - } - } else { - log.Errorf(ctx, "System configuration span was modified, but the "+ - "modification trigger is executing on a non-system range. "+ - "Configuration changes will not be gossiped.") - } - } if nlSpan := ct.ModifiedSpanTrigger.NodeLivenessSpan; nlSpan != nil { if err := pd.MergeAndDestroy( result.Result{ diff --git a/pkg/kv/kvserver/batcheval/cmd_end_transaction_test.go b/pkg/kv/kvserver/batcheval/cmd_end_transaction_test.go index 923b91a015cf..fa90387cba91 100644 --- a/pkg/kv/kvserver/batcheval/cmd_end_transaction_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_end_transaction_test.go @@ -1154,7 +1154,7 @@ func TestCommitWaitBeforeIntentResolutionIfCommitTrigger(t *testing.T) { } if commitTrigger { req.InternalCommitTrigger = &roachpb.InternalCommitTrigger{ - ModifiedSpanTrigger: &roachpb.ModifiedSpanTrigger{SystemConfigSpan: true}, + ModifiedSpanTrigger: &roachpb.ModifiedSpanTrigger{NodeLivenessSpan: &roachpb.Span{}}, } } var resp roachpb.EndTxnResponse @@ -1175,7 +1175,7 @@ func TestCommitWaitBeforeIntentResolutionIfCommitTrigger(t *testing.T) { if cfg.expError { require.Error(t, err) - require.Regexp(t, `txn .* with modified-span \(system-config\) commit trigger needs commit wait`, err) + require.Regexp(t, `txn .* with modified-span \(node-liveness\) commit trigger needs commit wait`, err) } else { require.NoError(t, err) } diff --git a/pkg/kv/kvserver/client_lease_test.go b/pkg/kv/kvserver/client_lease_test.go index 89a03e6cee07..b490ce628f67 100644 --- a/pkg/kv/kvserver/client_lease_test.go +++ b/pkg/kv/kvserver/client_lease_test.go @@ -22,8 +22,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/keys" @@ -31,7 +29,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/allocator/storepool" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -96,135 +93,6 @@ func TestStoreRangeLease(t *testing.T) { } } -// TestStoreGossipSystemData verifies that the system-config and node-liveness -// data is gossiped at startup in the mixed version state. -// -// TODO(ajwerner): Delete this test in 22.2. -func TestStoreGossipSystemData(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - zcfg := zonepb.DefaultZoneConfig() - version := clusterversion.ByKey(clusterversion.DisableSystemConfigGossipTrigger - 1) - settings := cluster.MakeTestingClusterSettingsWithVersions( - version, version, false, /* initializeVersion */ - ) - serverArgs := base.TestServerArgs{ - Settings: settings, - Knobs: base.TestingKnobs{ - Store: &kvserver.StoreTestingKnobs{ - DisableMergeQueue: true, - }, - Server: &server.TestingKnobs{ - DefaultZoneConfigOverride: &zcfg, - BinaryVersionOverride: version, - DisableAutomaticVersionUpgrade: make(chan struct{}), - }, - }, - } - tc := testcluster.StartTestCluster(t, 1, - base.TestClusterArgs{ - ReplicationMode: base.ReplicationManual, - ServerArgs: serverArgs, - }, - ) - defer tc.Stopper().Stop(context.Background()) - - store := tc.GetFirstStoreFromServer(t, 0) - splitKey := keys.SystemConfigSplitKey - tc.SplitRangeOrFatal(t, splitKey) - if _, err := store.DB().Inc(context.Background(), splitKey, 1); err != nil { - t.Fatalf("failed to increment: %+v", err) - } - - getSystemConfig := func(s *kvserver.Store) *config.SystemConfig { - systemConfig := s.Gossip().DeprecatedGetSystemConfig() - return systemConfig - } - getNodeLiveness := func(s *kvserver.Store) livenesspb.Liveness { - var liveness livenesspb.Liveness - if err := s.Gossip().GetInfoProto(gossip.MakeNodeLivenessKey(1), &liveness); err == nil { - return liveness - } - return livenesspb.Liveness{} - } - - // Restart the store and verify that both the system-config and node-liveness - // data is gossiped. - tc.AddAndStartServer(t, serverArgs) - tc.StopServer(0) - - testutils.SucceedsSoon(t, func() error { - if !getSystemConfig(tc.GetFirstStoreFromServer(t, 1)).DefaultZoneConfig.Equal(zcfg) { - return errors.New("system config not gossiped") - } - if getNodeLiveness(tc.GetFirstStoreFromServer(t, 1)) == (livenesspb.Liveness{}) { - return errors.New("node liveness not gossiped") - } - return nil - }) -} - -// TestGossipSystemConfigOnLeaseChange verifies that the system-config gets -// re-gossiped on lease transfer even if it hasn't changed. This helps prevent -// situations where a previous leaseholder can restart and not receive the -// system config because it was the original source of it within the gossip -// network. This test only applies in the mixed version state. -// -// TODO(ajwerner): Remove this test in 22.2. -func TestGossipSystemConfigOnLeaseChange(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - const numStores = 3 - tc := testcluster.StartTestCluster(t, numStores, - base.TestClusterArgs{ - ReplicationMode: base.ReplicationManual, - ServerArgs: base.TestServerArgs{ - Knobs: base.TestingKnobs{ - Store: &kvserver.StoreTestingKnobs{ - DisableMergeQueue: true, - }, - Server: &server.TestingKnobs{ - BinaryVersionOverride: clusterversion.ByKey( - clusterversion.DisableSystemConfigGossipTrigger - 1, - ), - DisableAutomaticVersionUpgrade: make(chan struct{}), - }, - }, - }, - }, - ) - defer tc.Stopper().Stop(context.Background()) - - key := keys.SystemConfigSpan.Key - tc.AddVotersOrFatal(t, key, tc.Target(1), tc.Target(2)) - - initialStoreIdx := -1 - for i := range tc.Servers { - if tc.GetFirstStoreFromServer(t, i).Gossip().InfoOriginatedHere(gossip.KeyDeprecatedSystemConfig) { - initialStoreIdx = i - } - } - if initialStoreIdx == -1 { - t.Fatalf("no store has gossiped system config; gossip contents: %+v", tc.GetFirstStoreFromServer(t, 0).Gossip().GetInfoStatus()) - } - - newStoreIdx := (initialStoreIdx + 1) % numStores - if err := tc.TransferRangeLease(tc.LookupRangeOrFatal(t, key), tc.Target(newStoreIdx)); err != nil { - t.Fatalf("Unexpected error %v", err) - } - testutils.SucceedsSoon(t, func() error { - if tc.GetFirstStoreFromServer(t, initialStoreIdx).Gossip().InfoOriginatedHere(gossip.KeyDeprecatedSystemConfig) { - return errors.New("system config still most recently gossiped by original leaseholder") - } - if !tc.GetFirstStoreFromServer(t, newStoreIdx).Gossip().InfoOriginatedHere(gossip.KeyDeprecatedSystemConfig) { - return errors.New("system config not most recently gossiped by new leaseholder") - } - return nil - }) -} - func TestGossipNodeLivenessOnLeaseChange(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index d46f34a312b7..8ba63652fe75 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -3790,10 +3790,6 @@ func TestStrictGCEnforcement(t *testing.T) { s := tc.Server(i) _, r := getFirstStoreReplica(t, s, tableKey) if c := r.SpanConfig(); c.TTL().Seconds() != (time.Duration(exp) * time.Second).Seconds() { - _, sysCfg := getFirstStoreReplica(t, tc.Server(i), keys.SystemConfigSpan.Key) - sysCfg.RaftLock() - require.NoError(t, sysCfg.MaybeGossipSystemConfigRaftMuLocked(ctx)) - sysCfg.RaftUnlock() return errors.Errorf("expected %d, got %f", exp, c.TTL().Seconds()) } } @@ -3805,11 +3801,8 @@ func TestStrictGCEnforcement(t *testing.T) { sqlDB.Exec(t, `SET CLUSTER SETTING kv.gc_ttl.strict_enforcement.enabled = `+fmt.Sprint(val)) testutils.SucceedsSoon(t, func() error { for i := 0; i < tc.NumServers(); i++ { - s, r := getFirstStoreReplica(t, tc.Server(i), keys.SystemConfigSpan.Key) + s, _ := getFirstStoreReplica(t, tc.Server(i), keys.SystemConfigSpan.Key) if kvserver.StrictGCEnforcement.Get(&s.ClusterSettings().SV) != val { - r.RaftLock() - require.NoError(t, r.MaybeGossipSystemConfigRaftMuLocked(ctx)) - r.RaftUnlock() return errors.Errorf("expected %v, got %v", val, !val) } } diff --git a/pkg/kv/kvserver/gossip_test.go b/pkg/kv/kvserver/gossip_test.go index 4abc0f22810b..a5e833dd6041 100644 --- a/pkg/kv/kvserver/gossip_test.go +++ b/pkg/kv/kvserver/gossip_test.go @@ -18,15 +18,9 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" - "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" @@ -34,7 +28,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/retry" - "github.com/stretchr/testify/require" ) func TestGossipFirstRange(t *testing.T) { @@ -211,92 +204,3 @@ func TestGossipHandlesReplacedNode(t *testing.T) { } } } - -// TestGossipAfterAbortOfSystemConfigTransactionAfterFailureDueToIntents tests -// that failures to gossip the system config due to intents are rectified when -// later intents are aborted. -// -// Note that this tests the gossip functionality only in the mixed version -// state. After the release is finalized, these gossip triggers will no longer -// happen. -// -// TODO(ajwerner): Delete this test in 22.2. -func TestGossipAfterAbortOfSystemConfigTransactionAfterFailureDueToIntents(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - ctx := context.Background() - settings := cluster.MakeTestingClusterSettingsWithVersions( - clusterversion.TestingBinaryMinSupportedVersion, - clusterversion.TestingBinaryMinSupportedVersion, - false, - ) - tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ - ServerArgs: base.TestServerArgs{ - Settings: settings, - Knobs: base.TestingKnobs{ - Store: &kvserver.StoreTestingKnobs{ - DisableMergeQueue: true, - }, - Server: &server.TestingKnobs{ - BinaryVersionOverride: clusterversion.TestingBinaryMinSupportedVersion, - DisableAutomaticVersionUpgrade: make(chan struct{}), - }, - }, - }, - }) - defer tc.Stopper().Stop(ctx) - require.NoError(t, tc.WaitForFullReplication()) - - db := tc.Server(0).DB() - - txA := db.NewTxn(ctx, "a") - txB := db.NewTxn(ctx, "b") - - require.NoError(t, txA.DeprecatedSetSystemConfigTrigger(true /* forSystemTenant */)) - db1000 := dbdesc.NewInitial(1000, "1000", username.AdminRoleName()) - require.NoError(t, txA.Put(ctx, - keys.SystemSQLCodec.DescMetadataKey(1000), - db1000.DescriptorProto())) - - require.NoError(t, txB.DeprecatedSetSystemConfigTrigger(true /* forSystemTenant */)) - db2000 := dbdesc.NewInitial(2000, "2000", username.AdminRoleName()) - require.NoError(t, txB.Put(ctx, - keys.SystemSQLCodec.DescMetadataKey(2000), - db2000.DescriptorProto())) - - const someTime = 10 * time.Millisecond - clearNotifictions := func(ch <-chan struct{}) { - for { - select { - case <-ch: - case <-time.After(someTime): - return - } - } - } - systemConfChangeCh := tc.Server(0).GossipI().(*gossip.Gossip).DeprecatedRegisterSystemConfigChannel() - clearNotifictions(systemConfChangeCh) - require.NoError(t, txB.Commit(ctx)) - select { - case <-systemConfChangeCh: - // This case is rare but happens sometimes. We gossip the node liveness - // in a bunch of cases so we just let the test finish here. The important - // thing is that sometimes we get to the next phase. - t.Log("got unexpected update. This can happen for a variety of " + - "reasons like lease transfers. The test is exiting without testing anything") - return - case <-time.After(someTime): - // Did not expect an update so this is the happy case - } - // Roll back the transaction which had laid down the intent which blocked the - // earlier gossip update, make sure we get a gossip notification now. - const aLongTime = 20 * someTime - require.NoError(t, txA.Rollback(ctx)) - select { - case <-systemConfChangeCh: - // Got an update. - case <-time.After(aLongTime): - t.Fatal("expected update") - } -} diff --git a/pkg/kv/kvserver/mvcc_gc_queue_test.go b/pkg/kv/kvserver/mvcc_gc_queue_test.go index 84c22e4d2251..9afde86d9d75 100644 --- a/pkg/kv/kvserver/mvcc_gc_queue_test.go +++ b/pkg/kv/kvserver/mvcc_gc_queue_test.go @@ -585,9 +585,9 @@ func TestMVCCGCQueueProcess(t *testing.T) { } } - cfg := tc.gossip.DeprecatedGetSystemConfig() - if cfg == nil { - t.Fatal("config not set") + cfg, err := tc.store.GetConfReader(ctx) + if err != nil { + t.Fatal(err) } // The total size of the GC'able versions of the keys and values in Info. @@ -870,9 +870,9 @@ func TestMVCCGCQueueTransactionTable(t *testing.T) { // Run GC. mgcq := newMVCCGCQueue(tc.store) - cfg := tc.gossip.DeprecatedGetSystemConfig() - if cfg == nil { - t.Fatal("config not set") + cfg, err := tc.store.GetConfReader(ctx) + if err != nil { + t.Fatal(err) } processed, err := mgcq.process(ctx, tc.repl, cfg) diff --git a/pkg/kv/kvserver/queue_test.go b/pkg/kv/kvserver/queue_test.go index acb3ff5da833..8ecc69133dc5 100644 --- a/pkg/kv/kvserver/queue_test.go +++ b/pkg/kv/kvserver/queue_test.go @@ -691,20 +691,21 @@ func TestAcceptsUnsplitRanges(t *testing.T) { bq.Start(stopper) // Check our config. - var sysCfg *config.SystemConfig + var cfg spanconfig.StoreReader testutils.SucceedsSoon(t, func() error { - sysCfg = s.cfg.Gossip.DeprecatedGetSystemConfig() - if sysCfg == nil { + cfg, err = bq.store.GetConfReader(ctx) + require.NoError(t, err) + if cfg == nil { return errors.New("system config not yet present") } return nil }) neverSplitsDesc := neverSplits.Desc() - if sysCfg.NeedsSplit(ctx, neverSplitsDesc.StartKey, neverSplitsDesc.EndKey) { + if cfg.NeedsSplit(ctx, neverSplitsDesc.StartKey, neverSplitsDesc.EndKey) { t.Fatal("System config says range needs to be split") } willSplitDesc := willSplit.Desc() - if sysCfg.NeedsSplit(ctx, willSplitDesc.StartKey, willSplitDesc.EndKey) { + if cfg.NeedsSplit(ctx, willSplitDesc.StartKey, willSplitDesc.EndKey) { t.Fatal("System config says range needs to be split") } @@ -736,11 +737,11 @@ func TestAcceptsUnsplitRanges(t *testing.T) { // Check our config. neverSplitsDesc = neverSplits.Desc() - if sysCfg.NeedsSplit(ctx, neverSplitsDesc.StartKey, neverSplitsDesc.EndKey) { + if cfg.NeedsSplit(ctx, neverSplitsDesc.StartKey, neverSplitsDesc.EndKey) { t.Fatal("System config says range needs to be split") } willSplitDesc = willSplit.Desc() - if !sysCfg.NeedsSplit(ctx, willSplitDesc.StartKey, willSplitDesc.EndKey) { + if !cfg.NeedsSplit(ctx, willSplitDesc.StartKey, willSplitDesc.EndKey) { t.Fatal("System config says range does not need to be split") } diff --git a/pkg/kv/kvserver/replica.go b/pkg/kv/kvserver/replica.go index 70344ec484e8..d38220cfb52e 100644 --- a/pkg/kv/kvserver/replica.go +++ b/pkg/kv/kvserver/replica.go @@ -2003,18 +2003,6 @@ func (r *Replica) GetExternalStorageFromURI( return r.store.cfg.ExternalStorageFromURI(ctx, uri, user) } -func (r *Replica) markSystemConfigGossipSuccess() { - r.mu.Lock() - defer r.mu.Unlock() - r.mu.failureToGossipSystemConfig = false -} - -func (r *Replica) markSystemConfigGossipFailed() { - r.mu.Lock() - defer r.mu.Unlock() - r.mu.failureToGossipSystemConfig = true -} - // GetResponseMemoryAccount implements the batcheval.EvalContext interface. func (r *Replica) GetResponseMemoryAccount() *mon.BoundAccount { // Return an empty account, which places no limits. Places where a real diff --git a/pkg/kv/kvserver/replica_gossip.go b/pkg/kv/kvserver/replica_gossip.go index a8a4da9e7757..476d70f492a5 100644 --- a/pkg/kv/kvserver/replica_gossip.go +++ b/pkg/kv/kvserver/replica_gossip.go @@ -13,7 +13,6 @@ package kvserver import ( "context" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/keys" @@ -65,91 +64,6 @@ func (r *Replica) shouldGossip(ctx context.Context) bool { return r.OwnsValidLease(ctx, r.store.Clock().NowAsClockTimestamp()) } -// MaybeGossipSystemConfigRaftMuLocked scans the entire SystemConfig span and -// gossips it. Further calls come from the trigger on EndTxn or range lease -// acquisition. -// -// Note that MaybeGossipSystemConfigRaftMuLocked gossips information only when -// the lease is actually held. The method does not request a range lease here -// since RequestLease and applyRaftCommand call the method and we need to avoid -// deadlocking in redirectOnOrAcquireLease. -// -// MaybeGossipSystemConfigRaftMuLocked must only be called from Raft commands -// while holding the raftMu (which provide the necessary serialization to avoid -// data races). -// -// TODO(nvanbenschoten,bdarnell): even though this is best effort, we should log -// louder when we continually fail to gossip system config. -// -// TODO(ajwerner): Remove this in 22.2. -func (r *Replica) MaybeGossipSystemConfigRaftMuLocked(ctx context.Context) error { - if r.ClusterSettings().Version.IsActive( - ctx, clusterversion.DisableSystemConfigGossipTrigger, - ) { - return nil - } - r.raftMu.AssertHeld() - if r.store.Gossip() == nil { - log.VEventf(ctx, 2, "not gossiping system config because gossip isn't initialized") - return nil - } - if !r.IsInitialized() { - log.VEventf(ctx, 2, "not gossiping system config because the replica isn't initialized") - return nil - } - if !r.ContainsKey(keys.SystemConfigSpan.Key) { - log.VEventf(ctx, 3, - "not gossiping system config because the replica doesn't contain the system config's start key") - return nil - } - if !r.shouldGossip(ctx) { - log.VEventf(ctx, 2, "not gossiping system config because the replica doesn't hold the lease") - return nil - } - - // TODO(marc): check for bad split in the middle of the SystemConfig span. - loadedCfg, err := r.loadSystemConfig(ctx) - if err != nil { - if errors.Is(err, errSystemConfigIntent) { - log.VEventf(ctx, 2, "not gossiping system config because intents were found on SystemConfigSpan") - r.markSystemConfigGossipFailed() - return nil - } - return errors.Wrap(err, "could not load SystemConfig span") - } - - if gossipedCfg := r.store.Gossip().DeprecatedGetSystemConfig(); gossipedCfg != nil && - gossipedCfg.Equal(loadedCfg) && - r.store.Gossip().InfoOriginatedHere(gossip.KeyDeprecatedSystemConfig) { - log.VEventf(ctx, 2, "not gossiping unchanged system config") - // Clear the failure bit if all intents have been resolved but there's - // nothing new to gossip. - r.markSystemConfigGossipSuccess() - return nil - } - - log.VEventf(ctx, 2, "gossiping system config") - if err := r.store.Gossip().AddInfoProto(gossip.KeyDeprecatedSystemConfig, loadedCfg, 0); err != nil { - return errors.Wrap(err, "failed to gossip system config") - } - r.markSystemConfigGossipSuccess() - return nil -} - -// MaybeGossipSystemConfigIfHaveFailureRaftMuLocked is a trigger to gossip the -// system config due to an abort of a transaction keyed in the system config -// span. It will call MaybeGossipSystemConfigRaftMuLocked if -// failureToGossipSystemConfig is true. -func (r *Replica) MaybeGossipSystemConfigIfHaveFailureRaftMuLocked(ctx context.Context) error { - r.mu.RLock() - failed := r.mu.failureToGossipSystemConfig - r.mu.RUnlock() - if !failed { - return nil - } - return r.MaybeGossipSystemConfigRaftMuLocked(ctx) -} - // MaybeGossipNodeLivenessRaftMuLocked gossips information for all node liveness // records stored on this range. To scan and gossip, this replica must hold the // lease to a range which contains some or all of the node liveness records. diff --git a/pkg/kv/kvserver/replica_proposal.go b/pkg/kv/kvserver/replica_proposal.go index 47088aea8345..b52ec9fd2d2c 100644 --- a/pkg/kv/kvserver/replica_proposal.go +++ b/pkg/kv/kvserver/replica_proposal.go @@ -401,9 +401,6 @@ func (r *Replica) leasePostApplyLocked( // Nothing to do. return } - if err := r.MaybeGossipSystemConfigRaftMuLocked(ctx); err != nil { - log.Errorf(ctx, "%v", err) - } if err := r.MaybeGossipNodeLivenessRaftMuLocked(ctx, keys.NodeLivenessSpan); err != nil { log.Errorf(ctx, "%v", err) } @@ -596,26 +593,6 @@ func (r *Replica) handleReadWriteLocalEvalResult(ctx context.Context, lResult re lResult.MaybeAddToSplitQueue = false } - // The gossip triggers below require raftMu to be held, but - // handleReadWriteLocalEvalResult() may be called from non-Raft code paths (in - // particular for noop proposals). LocalResult.RequiresRaft() will force - // results that set these gossip triggers to always go via Raft such that - // raftMu is held. The triggers assert that callers hold the mutex during race - // tests via raftMu.AssertHeld(). - if lResult.MaybeGossipSystemConfig { - if err := r.MaybeGossipSystemConfigRaftMuLocked(ctx); err != nil { - log.Errorf(ctx, "%v", err) - } - lResult.MaybeGossipSystemConfig = false - } - - if lResult.MaybeGossipSystemConfigIfHaveFailure { - if err := r.MaybeGossipSystemConfigIfHaveFailureRaftMuLocked(ctx); err != nil { - log.Errorf(ctx, "%v", err) - } - lResult.MaybeGossipSystemConfigIfHaveFailure = false - } - if lResult.MaybeGossipNodeLiveness != nil { if err := r.MaybeGossipNodeLivenessRaftMuLocked(ctx, *lResult.MaybeGossipNodeLiveness); err != nil { log.Errorf(ctx, "%v", err) diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index 00f5287fd3f3..9a069683fc59 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -30,7 +30,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/cli/exit" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" @@ -218,8 +217,6 @@ func (tc *testContext) StartWithStoreConfigAndVersion( tc.transport = store.cfg.Transport tc.engine = store.engine tc.store = store - // TODO(tbg): see if this is needed. Would like to remove it. - require.NoError(t, tc.initConfigs(t)) } func (tc *testContext) Sender() kv.Sender { @@ -252,27 +249,6 @@ func (tc *testContext) SendWrapped(args roachpb.Request) (roachpb.Response, *roa return tc.SendWrappedWith(roachpb.Header{}, args) } -// initConfigs creates default configuration entries. -// -// TODO(ajwerner): Remove this in 22.2. -func (tc *testContext) initConfigs(t testing.TB) error { - // Put an empty system config into gossip so that gossip callbacks get - // run. We're using a fake config, but it's hooked into SystemConfig. - if err := tc.gossip.AddInfoProto(gossip.KeyDeprecatedSystemConfig, - &config.SystemConfigEntries{}, 0); err != nil { - return err - } - - testutils.SucceedsSoon(t, func() error { - if cfg := tc.gossip.DeprecatedGetSystemConfig(); cfg == nil { - return errors.Errorf("expected system config to be set") - } - return nil - }) - - return nil -} - // addBogusReplicaToRangeDesc modifies the range descriptor to include a second // replica. This is useful for tests that want to pretend they're transferring // the range lease away, as the lease can only be obtained by Replicas which are @@ -1195,14 +1171,6 @@ func TestReplicaGossipConfigsOnLease(t *testing.T) { t.Fatal(err) } - // If this actually failed, we would have gossiped from MVCCPutProto. - // Unlikely, but why not check. - if cfg := tc.gossip.DeprecatedGetSystemConfig(); cfg != nil { - if nv := len(cfg.Values); nv == 1 && cfg.Values[nv-1].Key.Equal(key) { - t.Errorf("unexpected gossip of system config: %s", cfg) - } - } - // Expire our own lease which we automagically acquired due to being // first range and config holder. tc.manualClock.Set(leaseExpiry(tc.repl)) @@ -1221,12 +1189,6 @@ func TestReplicaGossipConfigsOnLease(t *testing.T) { tc.manualClock.Increment(11 + int64(tc.Clock().MaxOffset())) // advance time now = tc.Clock().NowAsClockTimestamp() - ch := tc.gossip.DeprecatedRegisterSystemConfigChannel() - select { - case <-ch: - default: - } - // Give lease to this range. if err := sendLeaseRequest(tc.repl, &roachpb.Lease{ Start: now.ToTimestamp().Add(11, 0).UnsafeToClockTimestamp(), @@ -1239,24 +1201,6 @@ func TestReplicaGossipConfigsOnLease(t *testing.T) { }); err != nil { t.Fatal(err) } - - testutils.SucceedsSoon(t, func() error { - sysCfg := tc.gossip.DeprecatedGetSystemConfig() - if sysCfg == nil { - return errors.Errorf("no system config yet") - } - var found bool - for _, cur := range sysCfg.Values { - if key.Equal(cur.Key) { - found = true - break - } - } - if !found { - return errors.Errorf("key %s not found in SystemConfig", key) - } - return nil - }) } // TestReplicaTSCacheLowWaterOnLease verifies that the low water mark @@ -1554,20 +1498,6 @@ func TestReplicaGossipFirstRange(t *testing.T) { } } -// TestReplicaGossipAllConfigs verifies that all config types are gossiped. -func TestReplicaGossipAllConfigs(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - ctx := context.Background() - tc := testContext{} - stopper := stop.NewStopper() - defer stopper.Stop(ctx) - tc.Start(ctx, t, stopper) - if cfg := tc.gossip.DeprecatedGetSystemConfig(); cfg == nil { - t.Fatal("config not set") - } -} - func getArgs(key []byte) roachpb.GetRequest { return roachpb.GetRequest{ RequestHeader: roachpb.RequestHeader{ @@ -10426,10 +10356,13 @@ func TestConsistenctQueueErrorFromCheckConsistency(t *testing.T) { tc := testContext{} tc.StartWithStoreConfig(ctx, t, stopper, cfg) + confReader, err := tc.store.GetConfReader(ctx) + if err != nil { + t.Fatal(err) + } for i := 0; i < 2; i++ { // Do this twice because it used to deadlock. See #25456. - sysCfg := tc.store.Gossip().DeprecatedGetSystemConfig() - processed, err := tc.store.consistencyQueue.process(ctx, tc.repl, sysCfg) + processed, err := tc.store.consistencyQueue.process(ctx, tc.repl, confReader) if !testutils.IsError(err, "boom") { t.Fatal(err) } diff --git a/pkg/kv/kvserver/split_queue_test.go b/pkg/kv/kvserver/split_queue_test.go index 9e75db36e0a6..27111c5ecaad 100644 --- a/pkg/kv/kvserver/split_queue_test.go +++ b/pkg/kv/kvserver/split_queue_test.go @@ -70,9 +70,9 @@ func TestSplitQueueShouldQueue(t *testing.T) { {roachpb.RKey(keys.SystemSQLCodec.TablePrefix(2001)), roachpb.RKeyMax, 32<<20 + 1, 64 << 20, true, 1}, } - cfg := tc.gossip.DeprecatedGetSystemConfig() - if cfg == nil { - t.Fatal("config not set") + cfg, err := tc.store.GetConfReader(ctx) + if err != nil { + t.Fatal(err) } for i, test := range testCases { // Create a replica for testing that is not hooked up to the store. This diff --git a/pkg/kv/kvserver/store.go b/pkg/kv/kvserver/store.go index 581b8a95fa18..71f8d9068d49 100644 --- a/pkg/kv/kvserver/store.go +++ b/pkg/kv/kvserver/store.go @@ -1068,6 +1068,7 @@ type StoreConfig struct { // SpanConfigsDisabled determines whether we're able to use the span configs // infrastructure or not. + // TODO(richardjcai): We can likely remove this. SpanConfigsDisabled bool // Used to subscribe to span configuration changes, keeping up-to-date a // data structure useful for retrieving span configs. Only available if @@ -2193,7 +2194,6 @@ func (s *Store) GetConfReader(ctx context.Context) (spanconfig.StoreReader, erro if s.cfg.SpanConfigsDisabled || !spanconfigstore.EnabledSetting.Get(&s.ClusterSettings().SV) || - !s.cfg.Settings.Version.IsActive(ctx, clusterversion.EnableSpanConfigStore) || s.TestingKnobs().UseSystemConfigSpanForQueues { sysCfg := s.cfg.SystemConfigProvider.GetSystemConfig() diff --git a/pkg/kv/mock_transactional_sender.go b/pkg/kv/mock_transactional_sender.go index 0af00a2da39c..d807546ad2a8 100644 --- a/pkg/kv/mock_transactional_sender.go +++ b/pkg/kv/mock_transactional_sender.go @@ -16,7 +16,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/util/hlc" - "github.com/cockroachdb/errors" ) // MockTransactionalSender allows a function to be used as a TxnSender. @@ -66,11 +65,6 @@ func (m *MockTransactionalSender) UpdateRootWithLeafFinalState( panic("unimplemented") } -// AnchorOnSystemConfigRange is part of the TxnSender interface. -func (m *MockTransactionalSender) AnchorOnSystemConfigRange() error { - return errors.New("unimplemented") -} - // TxnStatus is part of the TxnSender interface. func (m *MockTransactionalSender) TxnStatus() roachpb.TransactionStatus { return m.txn.Status diff --git a/pkg/kv/sender.go b/pkg/kv/sender.go index 71fc61d7590c..25450f472284 100644 --- a/pkg/kv/sender.go +++ b/pkg/kv/sender.go @@ -96,17 +96,6 @@ type Sender interface { type TxnSender interface { Sender - // AnchorOnSystemConfigRange ensures that the transaction record, - // if/when it will be created, will be created on the system config - // range. This is useful because some commit triggers only work when - // the EndTxn is evaluated on that range. - // - // An error is returned if the transaction's key has already been - // set by anything other than a previous call to this function - // (i.e. if the transaction already performed any writes). - // It is allowed to call this method multiple times. - AnchorOnSystemConfigRange() error - // GetLeafTxnInputState retrieves the input state necessary and // sufficient to initialize a LeafTxn from the current RootTxn. // diff --git a/pkg/kv/txn.go b/pkg/kv/txn.go index 70282aaffc1b..c6c6a5125032 100644 --- a/pkg/kv/txn.go +++ b/pkg/kv/txn.go @@ -63,9 +63,6 @@ type Txn struct { // commitTriggers are run upon successful commit. commitTriggers []func(ctx context.Context) - // systemConfigTrigger is set to true when modifying keys from the SystemConfig - // span. This sets the SystemConfigTrigger on EndTxnRequest. - systemConfigTrigger bool // mu holds fields that need to be synchronized for concurrent request execution. mu struct { @@ -402,30 +399,6 @@ func (txn *Txn) RequiredFrontier() hlc.Timestamp { return txn.mu.sender.RequiredFrontier() } -// DeprecatedSetSystemConfigTrigger sets the system db trigger to true on this transaction. -// This will impact the EndTxnRequest. Note that this method takes a boolean -// argument indicating whether this transaction is intended for the system -// tenant. Only transactions for the system tenant need to set the system config -// trigger which is used to gossip updates to the system config to KV servers. -// The KV servers need access to an up-to-date system config in order to -// determine split points and zone configurations. -func (txn *Txn) DeprecatedSetSystemConfigTrigger(forSystemTenant bool) error { - if txn.typ != RootTxn { - return errors.AssertionFailedf("DeprecatedSetSystemConfigTrigger() called on leaf txn") - } - if !forSystemTenant { - return nil - } - - txn.mu.Lock() - defer txn.mu.Unlock() - if err := txn.mu.sender.AnchorOnSystemConfigRange(); err != nil { - return err - } - txn.systemConfigTrigger = true - return nil -} - // DisablePipelining instructs the transaction not to pipeline requests. It // should rarely be necessary to call this method. // @@ -701,7 +674,7 @@ func (txn *Txn) commit(ctx context.Context) error { // to reduce contention by releasing locks. In multi-tenant settings, it // will be subject to admission control, and the zero CreateTime will give // it preference within the tenant. - et := endTxnReq(true /* commit */, txn.deadline(), txn.systemConfigTrigger) + et := endTxnReq(true, txn.deadline()) ba := roachpb.BatchRequest{Requests: et.unionArr[:]} _, pErr := txn.Send(ctx, ba) if pErr == nil { @@ -757,7 +730,7 @@ func (txn *Txn) CommitInBatch(ctx context.Context, b *Batch) error { if txn != b.txn { return errors.Errorf("a batch b can only be committed by b.txn") } - et := endTxnReq(true /* commit */, txn.deadline(), txn.systemConfigTrigger) + et := endTxnReq(true, txn.deadline()) b.growReqs(1) b.reqs[len(b.reqs)-1].Value = &et.union b.initResult(1 /* calls */, 0, b.raw, nil) @@ -877,7 +850,7 @@ func (txn *Txn) rollback(ctx context.Context) *roachpb.Error { // order to reduce contention by releasing locks. In multi-tenant // settings, it will be subject to admission control, and the zero // CreateTime will give it preference within the tenant. - et := endTxnReq(false /* commit */, nil /* deadline */, false /* systemConfigTrigger */) + et := endTxnReq(false, nil /* deadline */) ba := roachpb.BatchRequest{Requests: et.unionArr[:]} _, pErr := txn.Send(ctx, ba) if pErr == nil { @@ -903,7 +876,7 @@ func (txn *Txn) rollback(ctx context.Context) *roachpb.Error { // order to reduce contention by releasing locks. In multi-tenant // settings, it will be subject to admission control, and the zero // CreateTime will give it preference within the tenant. - et := endTxnReq(false /* commit */, nil /* deadline */, false /* systemConfigTrigger */) + et := endTxnReq(false, nil /* deadline */) ba := roachpb.BatchRequest{Requests: et.unionArr[:]} _ = contextutil.RunWithTimeout(ctx, "async txn rollback", asyncRollbackTimeout, func(ctx context.Context) error { @@ -945,17 +918,10 @@ type endTxnReqAlloc struct { unionArr [1]roachpb.RequestUnion } -func endTxnReq(commit bool, deadline *hlc.Timestamp, hasTrigger bool) *endTxnReqAlloc { +func endTxnReq(commit bool, deadline *hlc.Timestamp) *endTxnReqAlloc { alloc := new(endTxnReqAlloc) alloc.req.Commit = commit alloc.req.Deadline = deadline - if hasTrigger { - alloc.req.InternalCommitTrigger = &roachpb.InternalCommitTrigger{ - ModifiedSpanTrigger: &roachpb.ModifiedSpanTrigger{ - SystemConfigSpan: true, - }, - } - } alloc.union.EndTxn = &alloc.req alloc.unionArr[0].Value = &alloc.union return alloc diff --git a/pkg/kv/txn_test.go b/pkg/kv/txn_test.go index 8c2470a5bfbd..d55805698d1b 100644 --- a/pkg/kv/txn_test.go +++ b/pkg/kv/txn_test.go @@ -538,27 +538,6 @@ func TestUpdateDeadlineMaybe(t *testing.T) { } } -// Test that, if DeprecatedSetSystemConfigTrigger() fails, the systemConfigTrigger has not -// been set. -func TestAnchoringErrorNoTrigger(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - ctx := context.Background() - stopper := stop.NewStopper() - defer stopper.Stop(ctx) - - mc := hlc.NewManualClock(1) - clock := hlc.NewClock(mc, time.Nanosecond /* maxOffset */) - db := NewDB(log.MakeTestingAmbientCtxWithNewTracer(), MakeMockTxnSenderFactory( - func(context.Context, *roachpb.Transaction, roachpb.BatchRequest, - ) (*roachpb.BatchResponse, *roachpb.Error) { - return nil, nil - }), clock, stopper) - txn := NewTxn(ctx, db, 0 /* gatewayNodeID */) - require.EqualError(t, txn.DeprecatedSetSystemConfigTrigger(true /* forSystemTenant */), "unimplemented") - require.False(t, txn.systemConfigTrigger) -} - // TestTxnNegotiateAndSend tests the behavior of NegotiateAndSend, both when the // server-side fast path is possible (for single-range reads) and when it is not // (for cross-range reads). diff --git a/pkg/roachpb/data.go b/pkg/roachpb/data.go index 35f1c56de3d2..39c964412ced 100644 --- a/pkg/roachpb/data.go +++ b/pkg/roachpb/data.go @@ -855,8 +855,6 @@ func (ct InternalCommitTrigger) Kind() redact.SafeString { return "change-replicas" case ct.ModifiedSpanTrigger != nil: switch { - case ct.ModifiedSpanTrigger.SystemConfigSpan: - return "modified-span (system-config)" case ct.ModifiedSpanTrigger.NodeLivenessSpan != nil: return "modified-span (node-liveness)" default: diff --git a/pkg/roachpb/data.proto b/pkg/roachpb/data.proto index 46d84c2acf74..060a73be797d 100644 --- a/pkg/roachpb/data.proto +++ b/pkg/roachpb/data.proto @@ -249,7 +249,7 @@ message ChangeReplicasTrigger { // ModifiedSpanTrigger indicates that a specific span has been modified. // This can be used to trigger scan-and-gossip for the given span. message ModifiedSpanTrigger { - bool system_config_span = 1; + reserved 1; // node_liveness_span is set to indicate that node liveness records // need re-gossiping after modification or range lease updates. The // span is set to a single key when nodes update their liveness records diff --git a/pkg/server/BUILD.bazel b/pkg/server/BUILD.bazel index dc5157f6d345..2bf0e54e7fa9 100644 --- a/pkg/server/BUILD.bazel +++ b/pkg/server/BUILD.bazel @@ -400,8 +400,6 @@ go_test( "//pkg/settings/cluster", "//pkg/spanconfig", "//pkg/sql", - "//pkg/sql/catalog/catalogkeys", - "//pkg/sql/catalog/dbdesc", "//pkg/sql/catalog/descpb", "//pkg/sql/execinfrapb", "//pkg/sql/idxusage", diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index 8eb10126bada..01c6c4b2a101 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -11,7 +11,6 @@ package server import ( - "bytes" "compress/gzip" "context" "fmt" @@ -32,22 +31,15 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/build" "github.com/cockroachdb/cockroach/pkg/cli/exit" - "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/status/statuspb" - "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -396,109 +388,6 @@ func TestAcceptEncoding(t *testing.T) { } } -// TestSystemConfigGossip tests that system config gossip works in the mixed -// version state. After the 22.1 release is finalized, system config gossip -// will no longer occur. -// -// TODO(ajwerner): Delete this test in 22.2. -func TestSystemConfigGossip(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - ctx := context.Background() - settings := cluster.MakeTestingClusterSettingsWithVersions( - clusterversion.TestingBinaryMinSupportedVersion, - clusterversion.TestingBinaryMinSupportedVersion, - false, - ) - serverArgs := base.TestServerArgs{ - Settings: settings, - Knobs: base.TestingKnobs{ - Store: &kvserver.StoreTestingKnobs{ - DisableMergeQueue: true, - }, - Server: &TestingKnobs{ - BinaryVersionOverride: clusterversion.TestingBinaryMinSupportedVersion, - DisableAutomaticVersionUpgrade: make(chan struct{}), - }, - }, - } - s, _, kvDB := serverutils.StartServer(t, serverArgs) - defer s.Stopper().Stop(ctx) - ts := s.(*TestServer) - - key := catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, descpb.ID(keys.MaxSystemConfigDescID+1)) - valAt := func(i int) *descpb.Descriptor { - return dbdesc.NewInitial( - descpb.ID(i), "foo", username.AdminRoleName(), - ).DescriptorProto() - } - - // Register a callback for gossip updates. - resultChan := ts.Gossip().DeprecatedRegisterSystemConfigChannel() - - // The span gets gossiped when it first shows up. - select { - case <-resultChan: - - case <-time.After(500 * time.Millisecond): - t.Fatal("did not receive gossip message") - } - - // Write a system key with the transaction marked as having a Gossip trigger. - if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - if err := txn.DeprecatedSetSystemConfigTrigger(true /* forSystemTenant */); err != nil { - return err - } - return txn.Put(ctx, key, valAt(2)) - }); err != nil { - t.Fatal(err) - } - - // This has to be wrapped in a SucceedSoon because system upgrades on the - // testserver's startup can trigger system config updates without the key we - // wrote. - testutils.SucceedsSoon(t, func() error { - // New system config received. - var systemConfig *config.SystemConfig - select { - case <-resultChan: - systemConfig = ts.gossip.DeprecatedGetSystemConfig() - - case <-time.After(500 * time.Millisecond): - return errors.Errorf("did not receive gossip message") - } - - // Now check the new config. - var val *roachpb.Value - for _, kv := range systemConfig.Values { - if bytes.Equal(key, kv.Key) { - val = &kv.Value - break - } - } - if val == nil { - return errors.Errorf("key not found in gossiped info") - } - - // Make sure the returned value is valAt(2). - var got descpb.Descriptor - if err := val.GetProto(&got); err != nil { - return err - } - - _, expected, _, _ := descpb.FromDescriptor(valAt(2)) - _, db, _, _ := descpb.FromDescriptor(&got) - if db == nil { - panic(errors.Errorf("found nil database: %v", got)) - } - if !reflect.DeepEqual(*db, *expected) { - panic(errors.Errorf("mismatch: expected %+v, got %+v", *expected, *db)) - } - return nil - }) -} - func TestListenerFileCreation(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/spanconfig/spanconfigstore/store.go b/pkg/spanconfig/spanconfigstore/store.go index 02552a9b1450..6beff543e175 100644 --- a/pkg/spanconfig/spanconfigstore/store.go +++ b/pkg/spanconfig/spanconfigstore/store.go @@ -28,6 +28,7 @@ import ( // using the gossip backed system config span to instead using the span configs // infrastructure. It has no effect if COCKROACH_DISABLE_SPAN_CONFIGS // is set. +// TODO(richardjcai): We can likely remove this. var EnabledSetting = settings.RegisterBoolSetting( settings.SystemOnly, "spanconfig.store.enabled", diff --git a/pkg/sql/catalog/descs/txn.go b/pkg/sql/catalog/descs/txn.go index 80717a3e6c0c..6226a8c679c1 100644 --- a/pkg/sql/catalog/descs/txn.go +++ b/pkg/sql/catalog/descs/txn.go @@ -16,7 +16,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -82,15 +81,6 @@ func (cf *CollectionFactory) Txn( deletedDescs = catalog.DescriptorIDSet{} descsCol = cf.MakeCollection(ctx, nil /* temporarySchemaProvider */, nil /* monitor */) defer descsCol.ReleaseAll(ctx) - if !cf.settings.Version.IsActive( - ctx, clusterversion.DisableSystemConfigGossipTrigger, - ) { - if err := txn.DeprecatedSetSystemConfigTrigger( - cf.leaseMgr.Codec().ForSystemTenant(), - ); err != nil { - return err - } - } if err := f(ctx, txn, &descsCol); err != nil { return err } diff --git a/pkg/sql/gcjob/BUILD.bazel b/pkg/sql/gcjob/BUILD.bazel index 13c143c4a819..0d5ce27d9dd9 100644 --- a/pkg/sql/gcjob/BUILD.bazel +++ b/pkg/sql/gcjob/BUILD.bazel @@ -15,7 +15,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/gcjob", visibility = ["//visibility:public"], deps = [ - "//pkg/clusterversion", "//pkg/config", "//pkg/config/zonepb", "//pkg/jobs", diff --git a/pkg/sql/gcjob/descriptor_utils.go b/pkg/sql/gcjob/descriptor_utils.go index 63dc4171f798..9b4f5d303bb0 100644 --- a/pkg/sql/gcjob/descriptor_utils.go +++ b/pkg/sql/gcjob/descriptor_utils.go @@ -13,7 +13,6 @@ package gcjob import ( "context" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" @@ -75,13 +74,6 @@ func deleteDatabaseZoneConfig( return nil } return db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - if !settings.Version.IsActive( - ctx, clusterversion.DisableSystemConfigGossipTrigger, - ) { - if err := txn.DeprecatedSetSystemConfigTrigger(codec.ForSystemTenant()); err != nil { - return err - } - } b := &kv.Batch{} // Delete the zone config entry for the dropped database associated with the diff --git a/pkg/sql/opt/exec/execbuilder/BUILD.bazel b/pkg/sql/opt/exec/execbuilder/BUILD.bazel index 53a095e28194..6db700a483ce 100644 --- a/pkg/sql/opt/exec/execbuilder/BUILD.bazel +++ b/pkg/sql/opt/exec/execbuilder/BUILD.bazel @@ -14,7 +14,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/opt/exec/execbuilder", visibility = ["//visibility:public"], deps = [ - "//pkg/clusterversion", "//pkg/server/telemetry", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", diff --git a/pkg/sql/opt/exec/execbuilder/relational.go b/pkg/sql/opt/exec/execbuilder/relational.go index a8503b9354cb..aaae2d467235 100644 --- a/pkg/sql/opt/exec/execbuilder/relational.go +++ b/pkg/sql/opt/exec/execbuilder/relational.go @@ -15,7 +15,6 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -153,23 +152,6 @@ func (b *Builder) buildRelational(e memo.RelExpr) (execPlan, error) { // Mark the statement as containing DDL for use // in the SQL executor. b.IsDDL = true - - // This will set the system DB trigger for transactions containing - // schema-modifying statements that have no effect, such as - // `BEGIN; INSERT INTO ...; CREATE TABLE IF NOT EXISTS ...; COMMIT;` - // where the table already exists. This will generate some false schema - // cache refreshes, but that's expected to be quite rare in practice. - if !b.evalCtx.Settings.Version.IsActive( - b.evalCtx.Ctx(), clusterversion.DisableSystemConfigGossipTrigger, - ) { - if err := b.evalCtx.Txn.DeprecatedSetSystemConfigTrigger(b.evalCtx.Codec.ForSystemTenant()); err != nil { - return execPlan{}, errors.WithSecondaryError( - unimplemented.NewWithIssuef(26508, - "the first schema change statement in a transaction must precede any writes"), - err) - } - } - } if opt.IsMutationOp(e) { diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index 938312ff1945..d75389b0c010 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -1293,10 +1293,6 @@ func (ef *execFactory) ConstructInsert( tabDesc := table.(*optTable).desc cols := makeColList(table, insertColOrdSet) - if err := ef.planner.maybeSetSystemConfig(tabDesc.GetID()); err != nil { - return nil, err - } - // Create the table inserter, which does the bulk of the work. internal := ef.planner.SessionData().Internal ri, err := row.MakeInserter( @@ -1368,10 +1364,6 @@ func (ef *execFactory) ConstructInsertFastPath( tabDesc := table.(*optTable).desc cols := makeColList(table, insertColOrdSet) - if err := ef.planner.maybeSetSystemConfig(tabDesc.GetID()); err != nil { - return nil, err - } - // Create the table inserter, which does the bulk of the work. internal := ef.planner.SessionData().Internal ri, err := row.MakeInserter( @@ -1465,10 +1457,6 @@ func (ef *execFactory) ConstructUpdate( tabDesc := table.(*optTable).desc fetchCols := makeColList(table, fetchColOrdSet) - if err := ef.planner.maybeSetSystemConfig(tabDesc.GetID()); err != nil { - return nil, err - } - // Add each column to update as a sourceSlot. The CBO only uses scalarSlot, // since it compiles tuples and subqueries into a simple sequence of target // columns. @@ -1580,10 +1568,6 @@ func (ef *execFactory) ConstructUpsert( fetchCols := makeColList(table, fetchColOrdSet) updateCols := makeColList(table, updateColOrdSet) - if err := ef.planner.maybeSetSystemConfig(tabDesc.GetID()); err != nil { - return nil, err - } - // Create the table inserter, which does the bulk of the insert-related work. internal := ef.planner.SessionData().Internal ri, err := row.MakeInserter( @@ -1677,10 +1661,6 @@ func (ef *execFactory) ConstructDelete( tabDesc := table.(*optTable).desc fetchCols := makeColList(table, fetchColOrdSet) - if err := ef.planner.maybeSetSystemConfig(tabDesc.GetID()); err != nil { - return nil, err - } - // Create the table deleter, which does the bulk of the work. In the HP, // the deleter derives the columns that need to be fetched. By contrast, the // CBO will have already determined the set of fetch columns, and passes @@ -1742,10 +1722,6 @@ func (ef *execFactory) ConstructDeleteRange( var sb span.Builder sb.Init(ef.planner.EvalContext(), ef.planner.ExecCfg().Codec, tabDesc, tabDesc.GetPrimaryIndex()) - if err := ef.planner.maybeSetSystemConfig(tabDesc.GetID()); err != nil { - return nil, err - } - spans, err := sb.SpansFromConstraint(indexConstraint, span.NoopSplitter()) if err != nil { return nil, err diff --git a/pkg/sql/plan.go b/pkg/sql/plan.go index cab4d1b19723..3b35c4a260f2 100644 --- a/pkg/sql/plan.go +++ b/pkg/sql/plan.go @@ -13,10 +13,8 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/execstats" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" @@ -539,19 +537,6 @@ func (p *planner) maybePlanHook(ctx context.Context, stmt tree.Statement) (planN return nil, nil } -// Mark transaction as operating on the system DB if the descriptor id -// is within the SystemConfig range. -func (p *planner) maybeSetSystemConfig(id descpb.ID) error { - if !descpb.IsSystemConfigID(id) || p.execCfg.Settings.Version.IsActive( - p.EvalContext().Ctx(), clusterversion.DisableSystemConfigGossipTrigger, - ) { - return nil - } - // Mark transaction as operating on the system DB. - // Only the system tenant marks the SystemConfigTrigger. - return p.txn.DeprecatedSetSystemConfigTrigger(p.execCfg.Codec.ForSystemTenant()) -} - // planFlags is used throughout the planning code to keep track of various // events or decisions along the way. type planFlags uint32 diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index cb485b0d0e02..81b6a32c58a0 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -3051,13 +3051,6 @@ func DeleteTableDescAndZoneConfig( ) error { log.Infof(ctx, "removing table descriptor and zone config for table %d", tableDesc.GetID()) return db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - if !settings.Version.IsActive( - ctx, clusterversion.DisableSystemConfigGossipTrigger, - ) { - if err := txn.DeprecatedSetSystemConfigTrigger(codec.ForSystemTenant()); err != nil { - return err - } - } b := &kv.Batch{} // Delete the descriptor. diff --git a/pkg/sql/tests/BUILD.bazel b/pkg/sql/tests/BUILD.bazel index 7b08df9099a5..0148051f2559 100644 --- a/pkg/sql/tests/BUILD.bazel +++ b/pkg/sql/tests/BUILD.bazel @@ -5,15 +5,12 @@ go_library( srcs = [ "command_filters.go", "data.go", - "end_txn_trigger.go", "server_params.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/tests", visibility = ["//visibility:public"], deps = [ "//pkg/base", - "//pkg/clusterversion", - "//pkg/keys", "//pkg/kv", "//pkg/kv/kvserver", "//pkg/kv/kvserver/kvserverbase", diff --git a/pkg/sql/tests/end_txn_trigger.go b/pkg/sql/tests/end_txn_trigger.go deleted file mode 100644 index f2bfb66f6859..000000000000 --- a/pkg/sql/tests/end_txn_trigger.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2017 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package tests - -import ( - "bytes" - - "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/errors" -) - -// CheckEndTxnTrigger verifies that an EndTxnRequest that includes intents for -// the SystemDB keys sets the proper trigger. -// -// TODO(ajwerner): Remove this in 22.2. It only applies to the mixed-version -// state. -func CheckEndTxnTrigger(args kvserverbase.FilterArgs) *roachpb.Error { - req, ok := args.Req.(*roachpb.EndTxnRequest) - if !ok { - return nil - } - - if !req.Commit { - // This is a rollback: skip trigger verification. - return nil - } - - modifiedSpanTrigger := req.InternalCommitTrigger.GetModifiedSpanTrigger() - modifiedSystemConfigSpan := modifiedSpanTrigger != nil && modifiedSpanTrigger.SystemConfigSpan - - var hasSystemKey bool - for _, span := range req.LockSpans { - if bytes.Compare(span.Key, keys.SystemConfigSpan.Key) >= 0 && - bytes.Compare(span.Key, keys.SystemConfigSpan.EndKey) < 0 { - hasSystemKey = true - break - } - } - // If the transaction in question has intents in the system span, then - // modifiedSystemConfigSpan should always be true. However, it is possible - // for modifiedSystemConfigSpan to be set, even though no system keys are - // present. This can occur with certain conditional DDL statements (e.g. - // "CREATE TABLE IF NOT EXISTS"), which set the SystemConfigTrigger - // aggressively but may not actually end up changing the system DB depending - // on the current state. - // For more information, see the related comment at the beginning of - // planner.makePlan(). - if hasSystemKey && - !(clusterversion.ClusterVersion{Version: args.Version}). - IsActive(clusterversion.DisableSystemConfigGossipTrigger) && - !modifiedSystemConfigSpan { - return roachpb.NewError(errors.Errorf("EndTxn hasSystemKey=%t, but hasSystemConfigTrigger=%t", - hasSystemKey, modifiedSystemConfigSpan)) - } - - return nil -} diff --git a/pkg/sql/tests/server_params.go b/pkg/sql/tests/server_params.go index 8e3746ad15d0..a08c08063dba 100644 --- a/pkg/sql/tests/server_params.go +++ b/pkg/sql/tests/server_params.go @@ -25,7 +25,6 @@ import ( // if the EndTxn checks are important. func CreateTestServerParams() (base.TestServerArgs, *CommandFilters) { var cmdFilters CommandFilters - cmdFilters.AppendFilter(CheckEndTxnTrigger, true) params := base.TestServerArgs{} params.Knobs = CreateTestingKnobs() params.Knobs.Store = &kvserver.StoreTestingKnobs{ diff --git a/pkg/sql/txn_restart_test.go b/pkg/sql/txn_restart_test.go index 257c82c79b20..047d8dfdd672 100644 --- a/pkg/sql/txn_restart_test.go +++ b/pkg/sql/txn_restart_test.go @@ -1237,7 +1237,6 @@ func TestFlushUncommitedDescriptorCacheOnRestart(t *testing.T) { defer log.Scope(t).Close(t) var cmdFilters tests.CommandFilters - cmdFilters.AppendFilter(tests.CheckEndTxnTrigger, true) testKey := []byte("test_key") testingKnobs := &kvserver.StoreTestingKnobs{ EvalKnobs: kvserverbase.BatchEvalTestingKnobs{ From 4028bc266fb925d047a69d55192a1e93d692cee9 Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Fri, 3 Sep 2021 14:21:37 -0400 Subject: [PATCH 7/9] sql: use uint32 for DOid This resolves a piece of tech debt that has caused us a few surprises and bugs in the past. This doesn't change anything about the on-disk representation -- it just makes it so that OIDs handled in memory are more reliably unsigned 32 bit integers. Release note: None --- pkg/server/admin.go | 2 +- pkg/sql/catalog/schemaexpr/column.go | 2 +- pkg/sql/catalog/seqexpr/sequence.go | 2 +- pkg/sql/evalcatalog/pg_updatable.go | 9 +-- .../testdata/logic_test/builtin_function | 2 +- .../logictest/testdata/logic_test/pgoidtype | 2 +- pkg/sql/opt/constraint/span_test.go | 2 +- pkg/sql/pg_catalog.go | 10 +-- pkg/sql/pg_oid_test.go | 4 +- pkg/sql/pgwire/types.go | 2 +- pkg/sql/resolve_oid.go | 2 +- pkg/sql/row/expr_walker.go | 2 +- pkg/sql/rowenc/keyside/encode.go | 4 +- pkg/sql/rowenc/valueside/array.go | 2 +- pkg/sql/rowenc/valueside/encode.go | 2 +- pkg/sql/rowenc/valueside/legacy.go | 2 +- pkg/sql/sem/builtins/builtins.go | 22 +++---- pkg/sql/sem/builtins/pg_builtins.go | 38 +++++------ pkg/sql/sem/eval/cast.go | 22 +++---- pkg/sql/sem/eval/parse_doid.go | 4 +- pkg/sql/sem/tree/datum.go | 63 +++++++++---------- pkg/sql/user.go | 2 +- pkg/workload/rand/rand.go | 2 +- 23 files changed, 101 insertions(+), 103 deletions(-) diff --git a/pkg/server/admin.go b/pkg/server/admin.go index e22ac4ebd909..7f307e4a3162 100644 --- a/pkg/server/admin.go +++ b/pkg/server/admin.go @@ -3326,7 +3326,7 @@ func (s *adminServer) queryTableID( if row == nil { return descpb.InvalidID, errors.Newf("failed to resolve %q as a table name", tableName) } - return descpb.ID(tree.MustBeDOid(row[0]).DInt), nil + return descpb.ID(tree.MustBeDOid(row[0]).Oid), nil } // Note that the function returns plain errors, and it is the caller's diff --git a/pkg/sql/catalog/schemaexpr/column.go b/pkg/sql/catalog/schemaexpr/column.go index c709be9ab000..fdbce0c8a763 100644 --- a/pkg/sql/catalog/schemaexpr/column.go +++ b/pkg/sql/catalog/schemaexpr/column.go @@ -379,7 +379,7 @@ func GetSeqIDFromExpr(expr tree.Expr) (int64, bool) { } return id, true case *tree.DOid: - return int64(n.DInt), true + return int64(n.Oid), true default: return 0, false } diff --git a/pkg/sql/catalog/seqexpr/sequence.go b/pkg/sql/catalog/seqexpr/sequence.go index c03cf4706069..c1767e254ab0 100644 --- a/pkg/sql/catalog/seqexpr/sequence.go +++ b/pkg/sql/catalog/seqexpr/sequence.go @@ -97,7 +97,7 @@ func getSequenceIdentifier(expr tree.Expr) *SeqIdentifier { SeqName: seqName, } case *tree.DOid: - id := int64(a.DInt) + id := int64(a.Oid) return &SeqIdentifier{ SeqID: id, } diff --git a/pkg/sql/evalcatalog/pg_updatable.go b/pkg/sql/evalcatalog/pg_updatable.go index 5eabe58cc9dc..210bf816af57 100644 --- a/pkg/sql/evalcatalog/pg_updatable.go +++ b/pkg/sql/evalcatalog/pg_updatable.go @@ -34,9 +34,11 @@ var ( ) // PGRelationIsUpdatable is part of the eval.CatalogBuiltins interface. -func (b *Builtins) PGRelationIsUpdatable(ctx context.Context, oid *tree.DOid) (*tree.DInt, error) { +func (b *Builtins) PGRelationIsUpdatable( + ctx context.Context, oidArg *tree.DOid, +) (*tree.DInt, error) { tableDesc, err := b.dc.GetImmutableTableByID( - ctx, b.txn, descpb.ID(oid.DInt), tree.ObjectLookupFlagsWithRequired(), + ctx, b.txn, descpb.ID(oidArg.Oid), tree.ObjectLookupFlagsWithRequired(), ) if err != nil { // For postgres compatibility, it is expected that rather returning @@ -62,13 +64,12 @@ func (b *Builtins) PGRelationIsUpdatable(ctx context.Context, oid *tree.DOid) (* func (b *Builtins) PGColumnIsUpdatable( ctx context.Context, oidArg *tree.DOid, attNumArg tree.DInt, ) (*tree.DBool, error) { - oid := descpb.ID(oidArg.DInt) if attNumArg < 0 { // System columns are not updatable. return tree.DBoolFalse, nil } attNum := descpb.PGAttributeNum(attNumArg) - tableDesc, err := b.dc.GetImmutableTableByID(ctx, b.txn, oid, tree.ObjectLookupFlagsWithRequired()) + tableDesc, err := b.dc.GetImmutableTableByID(ctx, b.txn, descpb.ID(oidArg.Oid), tree.ObjectLookupFlagsWithRequired()) if err != nil { if sqlerrors.IsUndefinedRelationError(err) { // For postgres compatibility, it is expected that rather returning diff --git a/pkg/sql/logictest/testdata/logic_test/builtin_function b/pkg/sql/logictest/testdata/logic_test/builtin_function index 8429cca37212..5b307348a007 100644 --- a/pkg/sql/logictest/testdata/logic_test/builtin_function +++ b/pkg/sql/logictest/testdata/logic_test/builtin_function @@ -2368,7 +2368,7 @@ SELECT pg_catalog.length('hello') query OOO SELECT oid(3), oid(0), oid(12023948723) ---- -3 0 12023948723 +3 0 3434014131 query T SELECT to_english(i) FROM (VALUES (1), (13), (617), (-2), (-9223372036854775808)) AS a(i) diff --git a/pkg/sql/logictest/testdata/logic_test/pgoidtype b/pkg/sql/logictest/testdata/logic_test/pgoidtype index 25ce269f676d..fe1a615d3e24 100644 --- a/pkg/sql/logictest/testdata/logic_test/pgoidtype +++ b/pkg/sql/logictest/testdata/logic_test/pgoidtype @@ -420,7 +420,7 @@ SELECT proargtypes::REGTYPE[] FROM pg_proc WHERE proname = 'obj_description' query I SELECT 'trigger'::REGTYPE::INT ---- --1 +0 # Regression test for #41708. diff --git a/pkg/sql/opt/constraint/span_test.go b/pkg/sql/opt/constraint/span_test.go index 0b1515c4c986..e7cec6d97a89 100644 --- a/pkg/sql/opt/constraint/span_test.go +++ b/pkg/sql/opt/constraint/span_test.go @@ -700,7 +700,7 @@ func TestSpan_KeyCount(t *testing.T) { // Multiple key span with DOid datum type. keyCtx: kcAscAsc, length: 1, - span: ParseSpan(&evalCtx, "[/-5 - /5]", types.OidFamily), + span: ParseSpan(&evalCtx, "[/0 - /10]", types.OidFamily), expected: "11", }, { // 3 diff --git a/pkg/sql/pg_catalog.go b/pkg/sql/pg_catalog.go index 63ce90cb15a1..43a91cb9286d 100644 --- a/pkg/sql/pg_catalog.go +++ b/pkg/sql/pg_catalog.go @@ -1096,7 +1096,7 @@ func makeAllRelationsVirtualTableWithDescriptorIDIndex( var id descpb.ID switch t := unwrappedConstraint.(type) { case *tree.DOid: - id = descpb.ID(t.DInt) + id = descpb.ID(t.Oid) case *tree.DInt: id = descpb.ID(*t) default: @@ -3021,7 +3021,7 @@ https://www.postgresql.org/docs/9.5/catalog-pg-type.html`, h := makeOidHasher() nspOid := h.NamespaceOid(db.GetID(), pgCatalogName) coid := tree.MustBeDOid(unwrappedConstraint) - ooid := oid.Oid(int(coid.DInt)) + ooid := coid.Oid // Check if it is a predefined type. typ, ok := types.OidToType[ooid] @@ -4167,7 +4167,7 @@ https://www.postgresql.org/docs/9.6/catalog-pg-aggregate.html`, } } } - regprocForZeroOid := tree.NewDOidWithName(tree.DInt(0), types.RegProc, "-") + regprocForZeroOid := tree.NewDOidWithName(0, types.RegProc, "-") err := addRow( h.BuiltinOid(name, &overload).AsRegProc(name), // aggfnoid aggregateKind, // aggkind @@ -4213,7 +4213,7 @@ func init() { for _, o := range def.Definition { if overload, ok := o.(*tree.Overload); ok { builtinOid := h.BuiltinOid(name, overload) - id := oid.Oid(builtinOid.DInt) + id := builtinOid.Oid tree.OidToBuiltinName[id] = name overload.Oid = id } @@ -4283,7 +4283,7 @@ func (h oidHasher) writeUInt64(i uint64) { } func (h oidHasher) writeOID(oid *tree.DOid) { - h.writeUInt64(uint64(oid.DInt)) + h.writeUInt64(uint64(oid.Oid)) } type oidTypeTag uint8 diff --git a/pkg/sql/pg_oid_test.go b/pkg/sql/pg_oid_test.go index a74a5ae3daad..99f21800107b 100644 --- a/pkg/sql/pg_oid_test.go +++ b/pkg/sql/pg_oid_test.go @@ -39,8 +39,8 @@ func TestDefaultOid(t *testing.T) { for _, tc := range testCases { oid := tableOid(tc.id) - if tc.oid.DInt != oid.DInt { - t.Fatalf("expected oid %d(%32b), got %d(%32b)", tc.oid.DInt, tc.oid.DInt, oid.DInt, oid.DInt) + if tc.oid.Oid != oid.Oid { + t.Fatalf("expected oid %d(%32b), got %d(%32b)", tc.oid.Oid, tc.oid.Oid, oid.Oid, oid.Oid) } } } diff --git a/pkg/sql/pgwire/types.go b/pkg/sql/pgwire/types.go index 84939d6884c1..c052872bb6f7 100644 --- a/pkg/sql/pgwire/types.go +++ b/pkg/sql/pgwire/types.go @@ -744,7 +744,7 @@ func writeBinaryDatumNotNull( case *tree.DOid: b.putInt32(4) - b.putInt32(int32(v.DInt)) + b.putInt32(int32(v.Oid)) default: b.setError(errors.AssertionFailedf("unsupported type %T", d)) } diff --git a/pkg/sql/resolve_oid.go b/pkg/sql/resolve_oid.go index 177cf5e370e4..87a970bce0dd 100644 --- a/pkg/sql/resolve_oid.go +++ b/pkg/sql/resolve_oid.go @@ -88,7 +88,7 @@ func resolveOID( "%s %s does not exist", info.objName, toResolve) } return tree.NewDOidWithName( - results[0].(*tree.DOid).DInt, + tree.DInt(results[0].(*tree.DOid).Oid), resultType, tree.AsStringWithFlags(results[1], tree.FmtBareStrings), ), nil diff --git a/pkg/sql/row/expr_walker.go b/pkg/sql/row/expr_walker.go index 5f0c3f26c840..6c5061fc19c2 100644 --- a/pkg/sql/row/expr_walker.go +++ b/pkg/sql/row/expr_walker.go @@ -498,7 +498,7 @@ func importNextVal(evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) func importNextValByID(evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { c := getCellInfoAnnotation(evalCtx.Annotations) oid := tree.MustBeDOid(args[0]) - seqMetadata, ok := c.seqIDToMetadata[descpb.ID(oid.DInt)] + seqMetadata, ok := c.seqIDToMetadata[descpb.ID(oid.Oid)] if !ok { return nil, errors.Newf("sequence with ID %v not found in annotation", oid) } diff --git a/pkg/sql/rowenc/keyside/encode.go b/pkg/sql/rowenc/keyside/encode.go index 171ce5e0225b..59f593fa7557 100644 --- a/pkg/sql/rowenc/keyside/encode.go +++ b/pkg/sql/rowenc/keyside/encode.go @@ -163,9 +163,9 @@ func Encode(b []byte, val tree.Datum, dir encoding.Direction) ([]byte, error) { return encoding.EncodeBitArrayDescending(b, t.BitArray), nil case *tree.DOid: if dir == encoding.Ascending { - return encoding.EncodeVarintAscending(b, int64(t.DInt)), nil + return encoding.EncodeVarintAscending(b, int64(t.Oid)), nil } - return encoding.EncodeVarintDescending(b, int64(t.DInt)), nil + return encoding.EncodeVarintDescending(b, int64(t.Oid)), nil case *tree.DEnum: if dir == encoding.Ascending { return encoding.EncodeBytesAscending(b, t.PhysicalRep), nil diff --git a/pkg/sql/rowenc/valueside/array.go b/pkg/sql/rowenc/valueside/array.go index ae15ff62c4a4..54e9e8e68629 100644 --- a/pkg/sql/rowenc/valueside/array.go +++ b/pkg/sql/rowenc/valueside/array.go @@ -300,7 +300,7 @@ func encodeArrayElement(b []byte, d tree.Datum) ([]byte, error) { case *tree.DIPAddr: return encoding.EncodeUntaggedIPAddrValue(b, t.IPAddr), nil case *tree.DOid: - return encoding.EncodeUntaggedIntValue(b, int64(t.DInt)), nil + return encoding.EncodeUntaggedIntValue(b, int64(t.Oid)), nil case *tree.DCollatedString: return encoding.EncodeUntaggedBytesValue(b, []byte(t.Contents)), nil case *tree.DOidWrapper: diff --git a/pkg/sql/rowenc/valueside/encode.go b/pkg/sql/rowenc/valueside/encode.go index 15e8ef7724fa..11e29d7a1aa4 100644 --- a/pkg/sql/rowenc/valueside/encode.go +++ b/pkg/sql/rowenc/valueside/encode.go @@ -92,7 +92,7 @@ func Encode(appendTo []byte, colID ColumnIDDelta, val tree.Datum, scratch []byte case *tree.DCollatedString: return encoding.EncodeBytesValue(appendTo, uint32(colID), []byte(t.Contents)), nil case *tree.DOid: - return encoding.EncodeIntValue(appendTo, uint32(colID), int64(t.DInt)), nil + return encoding.EncodeIntValue(appendTo, uint32(colID), int64(t.Oid)), nil case *tree.DEnum: return encoding.EncodeBytesValue(appendTo, uint32(colID), t.PhysicalRep), nil case *tree.DVoid: diff --git a/pkg/sql/rowenc/valueside/legacy.go b/pkg/sql/rowenc/valueside/legacy.go index 1620ca1ec5e3..88efa1ea26c6 100644 --- a/pkg/sql/rowenc/valueside/legacy.go +++ b/pkg/sql/rowenc/valueside/legacy.go @@ -169,7 +169,7 @@ func MarshalLegacy(colType *types.T, val tree.Datum) (roachpb.Value, error) { } case types.OidFamily: if v, ok := val.(*tree.DOid); ok { - r.SetInt(int64(v.DInt)) + r.SetInt(int64(v.Oid)) return r, nil } case types.EnumFamily: diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go index d2267cdfff70..920876571916 100644 --- a/pkg/sql/sem/builtins/builtins.go +++ b/pkg/sql/sem/builtins/builtins.go @@ -2207,7 +2207,7 @@ var builtins = map[string]builtinDefinition{ if err != nil { return nil, err } - res, err := evalCtx.Sequence.IncrementSequenceByID(evalCtx.Ctx(), int64(dOid.DInt)) + res, err := evalCtx.Sequence.IncrementSequenceByID(evalCtx.Ctx(), int64(dOid.Oid)) if err != nil { return nil, err } @@ -2221,7 +2221,7 @@ var builtins = map[string]builtinDefinition{ ReturnType: tree.FixedReturnType(types.Int), Fn: func(evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { oid := tree.MustBeDOid(args[0]) - res, err := evalCtx.Sequence.IncrementSequenceByID(evalCtx.Ctx(), int64(oid.DInt)) + res, err := evalCtx.Sequence.IncrementSequenceByID(evalCtx.Ctx(), int64(oid.Oid)) if err != nil { return nil, err } @@ -2247,7 +2247,7 @@ var builtins = map[string]builtinDefinition{ if err != nil { return nil, err } - res, err := evalCtx.Sequence.GetLatestValueInSessionForSequenceByID(evalCtx.Ctx(), int64(dOid.DInt)) + res, err := evalCtx.Sequence.GetLatestValueInSessionForSequenceByID(evalCtx.Ctx(), int64(dOid.Oid)) if err != nil { return nil, err } @@ -2261,7 +2261,7 @@ var builtins = map[string]builtinDefinition{ ReturnType: tree.FixedReturnType(types.Int), Fn: func(evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { oid := tree.MustBeDOid(args[0]) - res, err := evalCtx.Sequence.GetLatestValueInSessionForSequenceByID(evalCtx.Ctx(), int64(oid.DInt)) + res, err := evalCtx.Sequence.GetLatestValueInSessionForSequenceByID(evalCtx.Ctx(), int64(oid.Oid)) if err != nil { return nil, err } @@ -2311,7 +2311,7 @@ var builtins = map[string]builtinDefinition{ newVal := tree.MustBeDInt(args[1]) if err := evalCtx.Sequence.SetSequenceValueByID( - evalCtx.Ctx(), uint32(dOid.DInt), int64(newVal), true /* isCalled */); err != nil { + evalCtx.Ctx(), uint32(dOid.Oid), int64(newVal), true /* isCalled */); err != nil { return nil, err } return args[1], nil @@ -2327,7 +2327,7 @@ var builtins = map[string]builtinDefinition{ oid := tree.MustBeDOid(args[0]) newVal := tree.MustBeDInt(args[1]) if err := evalCtx.Sequence.SetSequenceValueByID( - evalCtx.Ctx(), uint32(oid.DInt), int64(newVal), true /* isCalled */); err != nil { + evalCtx.Ctx(), uint32(oid.Oid), int64(newVal), true /* isCalled */); err != nil { return nil, err } return args[1], nil @@ -2351,7 +2351,7 @@ var builtins = map[string]builtinDefinition{ newVal := tree.MustBeDInt(args[1]) if err := evalCtx.Sequence.SetSequenceValueByID( - evalCtx.Ctx(), uint32(dOid.DInt), int64(newVal), isCalled); err != nil { + evalCtx.Ctx(), uint32(dOid.Oid), int64(newVal), isCalled); err != nil { return nil, err } return args[1], nil @@ -2371,7 +2371,7 @@ var builtins = map[string]builtinDefinition{ newVal := tree.MustBeDInt(args[1]) if err := evalCtx.Sequence.SetSequenceValueByID( - evalCtx.Ctx(), uint32(oid.DInt), int64(newVal), isCalled); err != nil { + evalCtx.Ctx(), uint32(oid.Oid), int64(newVal), isCalled); err != nil { return nil, err } return args[1], nil @@ -6498,7 +6498,7 @@ table's zone configuration this will return NULL.`, ReturnType: tree.FixedReturnType(types.Void), Fn: func(evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { oid := tree.MustBeDOid(args[0]) - if err := evalCtx.Planner.RepairTTLScheduledJobForTable(evalCtx.Ctx(), int64(oid.DInt)); err != nil { + if err := evalCtx.Planner.RepairTTLScheduledJobForTable(evalCtx.Ctx(), int64(oid.Oid)); err != nil { return nil, err } return tree.DVoidDatum, nil @@ -6587,7 +6587,7 @@ in the current database. Returns an error if validation fails.`, if err != nil { return nil, err } - if err := evalCtx.Planner.RevalidateUniqueConstraintsInTable(evalCtx.Ctx(), int(dOid.DInt)); err != nil { + if err := evalCtx.Planner.RevalidateUniqueConstraintsInTable(evalCtx.Ctx(), int(dOid.Oid)); err != nil { return nil, err } return tree.DVoidDatum, nil @@ -6613,7 +6613,7 @@ table. Returns an error if validation fails.`, return nil, err } if err = evalCtx.Planner.RevalidateUniqueConstraint( - evalCtx.Ctx(), int(dOid.DInt), string(constraintName), + evalCtx.Ctx(), int(dOid.Oid), string(constraintName), ); err != nil { return nil, err } diff --git a/pkg/sql/sem/builtins/pg_builtins.go b/pkg/sql/sem/builtins/pg_builtins.go index 9e09db8f9acb..e6dad046c2c2 100644 --- a/pkg/sql/sem/builtins/pg_builtins.go +++ b/pkg/sql/sem/builtins/pg_builtins.go @@ -654,7 +654,7 @@ var pgBuiltins = map[string]builtinDefinition{ t, err := ctx.Planner.QueryRowEx( ctx.Ctx(), "pg_get_function_result", sessiondata.NoSessionDataOverride, - `SELECT prorettype::REGTYPE::TEXT FROM pg_proc WHERE oid=$1`, int(funcOid.DInt)) + `SELECT prorettype::REGTYPE::TEXT FROM pg_proc WHERE oid=$1`, funcOid.Oid) if err != nil { return nil, err } @@ -681,7 +681,7 @@ var pgBuiltins = map[string]builtinDefinition{ t, err := ctx.Planner.QueryRowEx( ctx.Ctx(), "pg_get_function_identity_arguments", sessiondata.NoSessionDataOverride, - `SELECT array_agg(unnest(proargtypes)::REGTYPE::TEXT) FROM pg_proc WHERE oid=$1`, int(funcOid.DInt)) + `SELECT array_agg(unnest(proargtypes)::REGTYPE::TEXT) FROM pg_proc WHERE oid=$1`, funcOid.Oid) if err != nil { return nil, err } @@ -930,7 +930,7 @@ var pgBuiltins = map[string]builtinDefinition{ return tree.DNull, nil } maybeTypmod := args[1] - oid := oid.Oid(oidArg.(*tree.DOid).DInt) + oid := oidArg.(*tree.DOid).Oid typ, ok := types.OidToType[oid] if !ok { // If the type wasn't statically known, try looking it up as a user @@ -1009,7 +1009,7 @@ WHERE c.type=$1::int AND c.object_id=$2::int AND c.sub_id=$3::int LIMIT 1 Types: tree.ArgTypes{{"object_oid", types.Oid}}, ReturnType: tree.FixedReturnType(types.String), Fn: func(ctx *eval.Context, args tree.Datums) (tree.Datum, error) { - return getPgObjDesc(ctx, "", int(args[0].(*tree.DOid).DInt)) + return getPgObjDesc(ctx, "", args[0].(*tree.DOid).Oid) }, Info: notUsableInfo, Volatility: volatility.Stable, @@ -1020,7 +1020,7 @@ WHERE c.type=$1::int AND c.object_id=$2::int AND c.sub_id=$3::int LIMIT 1 Fn: func(ctx *eval.Context, args tree.Datums) (tree.Datum, error) { return getPgObjDesc(ctx, string(tree.MustBeDString(args[1])), - int(args[0].(*tree.DOid).DInt), + args[0].(*tree.DOid).Oid, ) }, Info: notUsableInfo, @@ -1046,7 +1046,7 @@ WHERE c.type=$1::int AND c.object_id=$2::int AND c.sub_id=$3::int LIMIT 1 ReturnType: tree.FixedReturnType(types.String), Fn: func(ctx *eval.Context, args tree.Datums) (tree.Datum, error) { catalogName := string(tree.MustBeDString(args[1])) - objOid := int(args[0].(*tree.DOid).DInt) + objOid := args[0].(*tree.DOid).Oid classOid, ok := getCatalogOidForComments(catalogName) if !ok { @@ -1131,7 +1131,7 @@ SELECT description t, err := ctx.Planner.QueryRowEx( ctx.Ctx(), "pg_function_is_visible", sessiondata.NoSessionDataOverride, - "SELECT * from pg_proc WHERE oid=$1 LIMIT 1", int(oid.DInt)) + "SELECT * from pg_proc WHERE oid=$1 LIMIT 1", oid.Oid) if err != nil { return nil, err } @@ -1154,7 +1154,7 @@ SELECT description Fn: func(ctx *eval.Context, args tree.Datums) (tree.Datum, error) { oidArg := tree.MustBeDOid(args[0]) isVisible, exists, err := ctx.Planner.IsTableVisible( - ctx.Context, ctx.SessionData().Database, ctx.SessionData().SearchPath, oid.Oid(oidArg.DInt), + ctx.Context, ctx.SessionData().Database, ctx.SessionData().SearchPath, oidArg.Oid, ) if err != nil { return nil, err @@ -1182,7 +1182,7 @@ SELECT description Fn: func(ctx *eval.Context, args tree.Datums) (tree.Datum, error) { oidArg := tree.MustBeDOid(args[0]) isVisible, exists, err := ctx.Planner.IsTypeVisible( - ctx.Context, ctx.SessionData().Database, ctx.SessionData().SearchPath, oid.Oid(oidArg.DInt), + ctx.Context, ctx.SessionData().Database, ctx.SessionData().SearchPath, oidArg.Oid, ) if err != nil { return nil, err @@ -1940,7 +1940,7 @@ SELECT description }, ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx *eval.Context, args tree.Datums) (tree.Datum, error) { - typid := oid.Oid(args[0].(*tree.DOid).DInt) + typid := args[0].(*tree.DOid).Oid typmod := *args[1].(*tree.DInt) if typmod == -1 { return tree.DNull, nil @@ -2010,7 +2010,7 @@ SELECT description }, ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx *eval.Context, args tree.Datums) (tree.Datum, error) { - typid := oid.Oid(tree.MustBeDOid(args[0]).DInt) + typid := tree.MustBeDOid(args[0]).Oid typmod := tree.MustBeDInt(args[1]) switch typid { case oid.T_int2: @@ -2047,7 +2047,7 @@ SELECT description }, ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx *eval.Context, args tree.Datums) (tree.Datum, error) { - typid := oid.Oid(tree.MustBeDOid(args[0]).DInt) + typid := tree.MustBeDOid(args[0]).Oid if typid == oid.T_int2 || typid == oid.T_int4 || typid == oid.T_int8 || typid == oid.T_float4 || typid == oid.T_float8 { return tree.NewDInt(2), nil } else if typid == oid.T_numeric { @@ -2069,7 +2069,7 @@ SELECT description }, ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx *eval.Context, args tree.Datums) (tree.Datum, error) { - typid := oid.Oid(tree.MustBeDOid(args[0]).DInt) + typid := tree.MustBeDOid(args[0]).Oid typmod := tree.MustBeDInt(args[1]) if typid == oid.T_int2 || typid == oid.T_int4 || typid == oid.T_int8 { return tree.NewDInt(0), nil @@ -2136,7 +2136,7 @@ func getCatalogOidForComments(catalogName string) (id int, ok bool) { // getPgObjDesc queries pg_description for object comments. catalog_name, if not // empty, provides a constraint on which "system catalog" the comment is in. // System catalogs are things like pg_class, pg_type, pg_database, and so on. -func getPgObjDesc(ctx *eval.Context, catalogName string, oid int) (tree.Datum, error) { +func getPgObjDesc(ctx *eval.Context, catalogName string, oidVal oid.Oid) (tree.Datum, error) { classOidFilter := "" if catalogName != "" { classOid, ok := getCatalogOidForComments(catalogName) @@ -2156,7 +2156,7 @@ SELECT description AND objsubid = 0 %[2]s LIMIT 1`, - oid, + oidVal, classOidFilter, )) if err != nil { @@ -2175,8 +2175,8 @@ func databaseHasPrivilegeSpecifier(databaseArg tree.Datum) (eval.HasPrivilegeSpe s := string(*t) specifier.DatabaseName = &s case *tree.DOid: - oid := oid.Oid(t.DInt) - specifier.DatabaseOID = &oid + oidVal := t.Oid + specifier.DatabaseOID = &oidVal default: return specifier, errors.AssertionFailedf("unknown privilege specifier: %#v", databaseArg) } @@ -2196,8 +2196,8 @@ func tableHasPrivilegeSpecifier( s := string(*t) specifier.TableName = &s case *tree.DOid: - oid := oid.Oid(t.DInt) - specifier.TableOID = &oid + oidVal := t.Oid + specifier.TableOID = &oidVal default: return specifier, errors.AssertionFailedf("unknown privilege specifier: %#v", tableArg) } diff --git a/pkg/sql/sem/eval/cast.go b/pkg/sql/sem/eval/cast.go index eac5408ee8d7..8e0f372c8d7c 100644 --- a/pkg/sql/sem/eval/cast.go +++ b/pkg/sql/sem/eval/cast.go @@ -231,7 +231,7 @@ func performCastWithoutPrecisionTruncation( } res = tree.NewDInt(tree.DInt(iv)) case *tree.DOid: - res = &v.DInt + res = tree.NewDInt(tree.DInt(v.Oid)) case *tree.DJSON: dec, ok := v.AsDecimal() if !ok { @@ -856,13 +856,9 @@ func performCastWithoutPrecisionTruncation( case types.OidFamily: switch v := d.(type) { case *tree.DOid: - return performIntToOidCast(ctx.Ctx(), ctx.Planner, t, v.DInt) + return performIntToOidCast(ctx.Ctx(), ctx.Planner, t, tree.DInt(v.Oid)) case *tree.DInt: - // OIDs are always unsigned 32-bit integers. Some languages, like Java, - // store OIDs as signed 32-bit integers, so we implement the cast - // by converting to a uint32 first. This matches Postgres behavior. - i := tree.DInt(uint32(*v)) - return performIntToOidCast(ctx.Ctx(), ctx.Planner, t, i) + return performIntToOidCast(ctx.Ctx(), ctx.Planner, t, *v) case *tree.DString: if t.Oid() != oid.T_oid && string(*v) == tree.ZeroOidValue { return tree.WrapAsZeroOid(t), nil @@ -912,16 +908,20 @@ func performCastWithoutPrecisionTruncation( func performIntToOidCast( ctx context.Context, res TypeResolver, t *types.T, v tree.DInt, ) (tree.Datum, error) { + // OIDs are always unsigned 32-bit integers. Some languages, like Java, + // store OIDs as signed 32-bit integers, so we implement the cast + // by converting to a uint32 first. This matches Postgres behavior. + o := oid.Oid(v) switch t.Oid() { case oid.T_oid: return tree.NewDOidWithType(v, t), nil case oid.T_regtype: // Mapping an dOid to a regtype is easy: we have a hardcoded map. var name string - if typ, ok := types.OidToType[oid.Oid(v)]; ok { + if typ, ok := types.OidToType[o]; ok { name = typ.PGName() - } else if types.IsOIDUserDefinedType(oid.Oid(v)) { - typ, err := res.ResolveTypeByOID(ctx, oid.Oid(v)) + } else if types.IsOIDUserDefinedType(o) { + typ, err := res.ResolveTypeByOID(ctx, o) if err != nil { return nil, err } @@ -933,7 +933,7 @@ func performIntToOidCast( case oid.T_regproc, oid.T_regprocedure: // Mapping an dOid to a regproc is easy: we have a hardcoded map. - name, ok := tree.OidToBuiltinName[oid.Oid(v)] + name, ok := tree.OidToBuiltinName[o] if !ok { if v == 0 { return tree.WrapAsZeroOid(t), nil diff --git a/pkg/sql/sem/eval/parse_doid.go b/pkg/sql/sem/eval/parse_doid.go index d2e5b794a027..1bb600a72801 100644 --- a/pkg/sql/sem/eval/parse_doid.go +++ b/pkg/sql/sem/eval/parse_doid.go @@ -117,9 +117,9 @@ func ParseDOid(ctx *Context, s string, t *types.T) (*tree.DOid, error) { default: return nil, missingTypeErr } - // Types we don't support get OID -1, so they won't match anything + // Types we don't support get OID 0, so they won't match anything // in catalogs. - return tree.NewDOidWithTypeAndName(-1, t, s), nil + return tree.NewDOidWithTypeAndName(0, t, s), nil case oid.T_regclass: tn, err := castStringToRegClassTableName(s) diff --git a/pkg/sql/sem/tree/datum.go b/pkg/sql/sem/tree/datum.go index c26f62156090..b8838f3c1dd0 100644 --- a/pkg/sql/sem/tree/datum.go +++ b/pkg/sql/sem/tree/datum.go @@ -760,7 +760,7 @@ func (d *DInt) CompareError(ctx CompareContext, other Datum) (int, error) { // compare OIDs to signed 32-bit integers, so we implement the comparison // by converting to a uint32 first. This matches Postgres behavior. thisInt = DInt(uint32(thisInt)) - v = t.DInt + v = DInt(t.Oid) default: return 0, makeUnsupportedComparisonMessage(d, other) } @@ -4860,12 +4860,11 @@ func (d *DEnum) MinWriteable() (Datum, bool) { // of the reg* types, such as regproc or regclass. An OID must only be // 32 bits, since this width encoding is enforced in the pgwire protocol. // OIDs are not guaranteed to be globally unique. -// TODO(rafi): make this use a uint32 instead of a DInt. type DOid struct { - // A DOid embeds a DInt, the underlying integer OID for this OID datum. - DInt + // A DOid embeds a oid.Oid, the underlying integer OID for this OID datum. + Oid oid.Oid // semanticType indicates the particular variety of OID this datum is, whether raw - // oid or a reg* type. + // Oid or a reg* type. semanticType *types.T // name is set to the resolved name of this OID, if available. name string @@ -4873,18 +4872,18 @@ type DOid struct { // MakeDOid is a helper routine to create a DOid initialized from a DInt. func MakeDOid(d DInt, semanticType *types.T) DOid { - return DOid{DInt: d, semanticType: semanticType, name: ""} + return DOid{Oid: oid.Oid(d), semanticType: semanticType, name: ""} } // NewDOidWithType constructs a DOid with the given type and no name. func NewDOidWithType(d DInt, semanticType *types.T) *DOid { - oid := DOid{DInt: d, semanticType: semanticType} + oid := DOid{Oid: oid.Oid(d), semanticType: semanticType} return &oid } // NewDOidWithTypeAndName constructs a DOid with the given type and name. func NewDOidWithTypeAndName(d DInt, semanticType *types.T, name string) *DOid { - oid := DOid{DInt: d, semanticType: semanticType, name: name} + oid := DOid{Oid: oid.Oid(d), semanticType: semanticType, name: name} return &oid } @@ -4924,7 +4923,7 @@ func MustBeDOid(e Expr) *DOid { // and a string. func NewDOidWithName(d DInt, typ *types.T, name string) *DOid { return &DOid{ - DInt: d, + Oid: oid.Oid(d), semanticType: typ, name: name, } @@ -4956,23 +4955,23 @@ func (d *DOid) CompareError(ctx CompareContext, other Datum) (int, error) { // NULL is less than any non-NULL value. return 1, nil } - var v DInt + var v oid.Oid switch t := ctx.UnwrapDatum(other).(type) { case *DOid: - v = t.DInt + v = t.Oid case *DInt: // OIDs are always unsigned 32-bit integers. Some languages, like Java, // compare OIDs to signed 32-bit integers, so we implement the comparison // by converting to a uint32 first. This matches Postgres behavior. - v = DInt(uint32(*t)) + v = oid.Oid(*t) default: return 0, makeUnsupportedComparisonMessage(d, other) } - if d.DInt < v { + if d.Oid < v { return -1, nil } - if d.DInt > v { + if d.Oid > v { return 1, nil } return 0, nil @@ -4980,18 +4979,14 @@ func (d *DOid) CompareError(ctx CompareContext, other Datum) (int, error) { // Format implements the Datum interface. func (d *DOid) Format(ctx *FmtCtx) { + s := strconv.FormatUint(uint64(d.Oid), 10) if d.semanticType.Oid() == oid.T_oid || d.name == "" { - // If we call FormatNode directly when the disambiguateDatumTypes flag - // is set, then we get something like 123:::INT:::OID. This is the - // important flag set by FmtParsable which is supposed to be - // roundtrippable. Since in this branch, a DOid is a thin wrapper around - // a DInt, I _think_ it's correct to just delegate to the DInt's Format. - d.DInt.Format(ctx) + ctx.WriteString(s) } else if ctx.HasFlags(fmtDisambiguateDatumTypes) { ctx.WriteString("crdb_internal.create_") ctx.WriteString(d.semanticType.SQLStandardName()) ctx.WriteByte('(') - d.DInt.Format(ctx) + ctx.WriteString(s) ctx.WriteByte(',') lexbase.EncodeSQLStringWithFlags(&ctx.Buffer, d.name, lexbase.EncNoFlags) ctx.WriteByte(')') @@ -5003,21 +4998,25 @@ func (d *DOid) Format(ctx *FmtCtx) { } // IsMax implements the Datum interface. -func (d *DOid) IsMax(ctx CompareContext) bool { return d.DInt.IsMax(ctx) } +func (d *DOid) IsMax(ctx CompareContext) bool { + return d.Oid == math.MaxUint32 +} // IsMin implements the Datum interface. -func (d *DOid) IsMin(ctx CompareContext) bool { return d.DInt.IsMin(ctx) } +func (d *DOid) IsMin(ctx CompareContext) bool { + return d.Oid == 0 +} // Next implements the Datum interface. func (d *DOid) Next(ctx CompareContext) (Datum, bool) { - next, ok := d.DInt.Next(ctx) - return &DOid{*next.(*DInt), d.semanticType, ""}, ok + next := d.Oid + 1 + return &DOid{next, d.semanticType, ""}, true } // Prev implements the Datum interface. func (d *DOid) Prev(ctx CompareContext) (Datum, bool) { - prev, ok := d.DInt.Prev(ctx) - return &DOid{*prev.(*DInt), d.semanticType, ""}, ok + prev := d.Oid - 1 + return &DOid{prev, d.semanticType, ""}, true } // ResolvedType implements the Datum interface. @@ -5030,14 +5029,12 @@ func (d *DOid) Size() uintptr { return unsafe.Sizeof(*d) } // Max implements the Datum interface. func (d *DOid) Max(ctx CompareContext) (Datum, bool) { - max, ok := d.DInt.Max(ctx) - return &DOid{*max.(*DInt), d.semanticType, ""}, ok + return &DOid{math.MaxUint32, d.semanticType, ""}, true } // Min implements the Datum interface. func (d *DOid) Min(ctx CompareContext) (Datum, bool) { - min, ok := d.DInt.Min(ctx) - return &DOid{*min.(*DInt), d.semanticType, ""}, ok + return &DOid{0, d.semanticType, ""}, true } // DOidWrapper is a Datum implementation which is a wrapper around a Datum, allowing @@ -5467,8 +5464,8 @@ func MaxDistinctCount(evalCtx CompareContext, first, last Datum) (_ int64, ok bo case *DOid: otherDOid, otherOk := AsDOid(last) if otherOk { - start = int64((*t).DInt) - end = int64(otherDOid.DInt) + start = int64(t.Oid) + end = int64(otherDOid.Oid) } case *DDate: diff --git a/pkg/sql/user.go b/pkg/sql/user.go index e103a377d401..a1b166578ea0 100644 --- a/pkg/sql/user.go +++ b/pkg/sql/user.go @@ -378,7 +378,7 @@ WHERE var ok bool for ok, err = defaultSettingsIt.Next(ctx); ok; ok, err = defaultSettingsIt.Next(ctx) { row := defaultSettingsIt.Cur() - fetechedDatabaseID := descpb.ID(tree.MustBeDOid(row[0]).DInt) + fetechedDatabaseID := descpb.ID(tree.MustBeDOid(row[0]).Oid) fetchedUsername := username.MakeSQLUsernameFromPreNormalizedString(string(tree.MustBeDString(row[1]))) settingsDatum := tree.MustBeDArray(row[2]) fetchedSettings := make([]string, settingsDatum.Len()) diff --git a/pkg/workload/rand/rand.go b/pkg/workload/rand/rand.go index ca6972b845fc..47813ff0b9bc 100644 --- a/pkg/workload/rand/rand.go +++ b/pkg/workload/rand/rand.go @@ -334,7 +334,7 @@ func DatumToGoSQL(d tree.Datum) (interface{}, error) { case *tree.DInt: return int64(*d), nil case *tree.DOid: - return int(d.DInt), nil + return uint32(d.Oid), nil case *tree.DFloat: return float64(*d), nil case *tree.DDecimal: From 7c1989312cc40843b8f96570dd69ffad8319cd9c Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Mon, 6 Jun 2022 13:47:27 -0400 Subject: [PATCH 8/9] eval: show error when casting to OID out of range Release note (sql change): Casting from an integer to OID, or calling the `oid` builtin function, using an integer that is larger than 32 bits now results in an error. Specifically, the range for valid inputs is [MinInt32, MaxUint32]. --- .../testdata/logic_test/builtin_function | 16 ++++++++-- pkg/sql/sem/builtins/pg_builtins.go | 4 +-- pkg/sql/sem/eval/cast.go | 3 ++ pkg/sql/sem/eval/testdata/eval/cast | 29 +++++++++++++++++++ 4 files changed, 47 insertions(+), 5 deletions(-) diff --git a/pkg/sql/logictest/testdata/logic_test/builtin_function b/pkg/sql/logictest/testdata/logic_test/builtin_function index 5b307348a007..7a9d15d711a7 100644 --- a/pkg/sql/logictest/testdata/logic_test/builtin_function +++ b/pkg/sql/logictest/testdata/logic_test/builtin_function @@ -2365,10 +2365,20 @@ SELECT pg_catalog.length('hello') ---- 5 -query OOO -SELECT oid(3), oid(0), oid(12023948723) +# -2147483648 is MinInt32. +# 4294967295 is MaxUint32. +query OOOOO +SELECT oid(3), oid(0), (-1)::oid, (-2147483648)::oid, (4294967295)::oid ---- -3 0 3434014131 +3 0 4294967295 2147483648 4294967295 + +# -2147483649 is (MinInt32 - 1). +query error OID out of range: -2147483649 +SELECT oid(-2147483649) + +# 4294967296 is (MaxUint32 + 1). +query error OID out of range: 4294967296 +SELECT oid(4294967296) query T SELECT to_english(i) FROM (VALUES (1), (13), (617), (-2), (-9223372036854775808)) AS a(i) diff --git a/pkg/sql/sem/builtins/pg_builtins.go b/pkg/sql/sem/builtins/pg_builtins.go index e6dad046c2c2..aac813137d05 100644 --- a/pkg/sql/sem/builtins/pg_builtins.go +++ b/pkg/sql/sem/builtins/pg_builtins.go @@ -1032,8 +1032,8 @@ WHERE c.type=$1::int AND c.object_id=$2::int AND c.sub_id=$3::int LIMIT 1 tree.Overload{ Types: tree.ArgTypes{{"int", types.Int}}, ReturnType: tree.FixedReturnType(types.Oid), - Fn: func(_ *eval.Context, args tree.Datums) (tree.Datum, error) { - return tree.NewDOid(*args[0].(*tree.DInt)), nil + Fn: func(evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { + return eval.PerformCast(evalCtx, args[0], types.Oid) }, Info: "Converts an integer to an OID.", Volatility: volatility.Immutable, diff --git a/pkg/sql/sem/eval/cast.go b/pkg/sql/sem/eval/cast.go index 8e0f372c8d7c..3ed4cf86ebc5 100644 --- a/pkg/sql/sem/eval/cast.go +++ b/pkg/sql/sem/eval/cast.go @@ -911,6 +911,9 @@ func performIntToOidCast( // OIDs are always unsigned 32-bit integers. Some languages, like Java, // store OIDs as signed 32-bit integers, so we implement the cast // by converting to a uint32 first. This matches Postgres behavior. + if v > math.MaxUint32 || v < math.MinInt32 { + return nil, pgerror.Newf(pgcode.NumericValueOutOfRange, "OID out of range: %d", v) + } o := oid.Oid(v) switch t.Oid() { case oid.T_oid: diff --git a/pkg/sql/sem/eval/testdata/eval/cast b/pkg/sql/sem/eval/testdata/eval/cast index 4e293cd97713..5675fb48e938 100644 --- a/pkg/sql/sem/eval/testdata/eval/cast +++ b/pkg/sql/sem/eval/testdata/eval/cast @@ -1376,3 +1376,32 @@ eval '2'::BIT ---- could not parse string as bit array: "2" is not a valid binary digit + +eval +(-1)::oid +---- +4294967295 + +# -2147483648 is MinInt32. +eval +(-2147483648)::oid +---- +2147483648 + +# -2147483649 is (MinInt32 - 1). +eval +(-2147483649)::oid +---- +OID out of range: -2147483649 + +# 4294967295 is MaxUint32. +eval +(4294967295)::oid +---- +4294967295 + +# 4294967296 is (MaxUint32 + 1). +eval +(4294967296)::oid +---- +OID out of range: 4294967296 From dde16a878a25813a6413e9e417e286a65f8471ce Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Mon, 6 Jun 2022 16:54:40 -0400 Subject: [PATCH 9/9] eval: show error when comparing OID to int out of range Release note (sql change): An error is now returned if an OID is compared to an integer that is out of the allowable range of OID input values, which is [MinInt32, MaxUint32]. --- .../logictest/testdata/logic_test/pgoidtype | 37 ++++++++++++++++ pkg/sql/sem/eval/binary_op.go | 5 ++- pkg/sql/sem/eval/testdata/eval/oid | 44 +++++++++++++++++++ pkg/sql/sem/tree/datum.go | 6 +++ 4 files changed, 91 insertions(+), 1 deletion(-) diff --git a/pkg/sql/logictest/testdata/logic_test/pgoidtype b/pkg/sql/logictest/testdata/logic_test/pgoidtype index fe1a615d3e24..695763392b48 100644 --- a/pkg/sql/logictest/testdata/logic_test/pgoidtype +++ b/pkg/sql/logictest/testdata/logic_test/pgoidtype @@ -525,3 +525,40 @@ regression_62205 # Check we error as appropriate if the OID type is not legit. statement error pgcode 22P02 invalid input syntax for type oid: "regression_69907" SELECT 'regression_69907'::oid + +# 4294967295 is MaxUint32. +# -2147483648 is MinInt32. +query OIBB +SELECT o, i, o > i, i > o FROM (VALUES + (1::oid, 4294967295::int8), + (1::oid, -2147483648::int8), + ((-1)::oid, 4294967295::int8), + ((-1)::oid, -2147483648::int8), + ((-2147483648)::oid, 4294967295::int8), + ((-2147483648)::oid, -2147483648::int8), + (4294967295::oid, 4294967295::int8), + (4294967295::oid, -2147483648::int8) +) tbl(o, i) +---- +1 4294967295 false true +1 -2147483648 false true +4294967295 4294967295 false false +4294967295 -2147483648 true false +2147483648 4294967295 false true +2147483648 -2147483648 false false +4294967295 4294967295 false false +4294967295 -2147483648 true false + +# 4294967296 is (MaxUint32 + 1). +query error OID out of range: 4294967296 +SELECT 1:::OID >= 4294967296:::INT8 + +query error OID out of range: 4294967296 +SELECT 4294967296:::INT8 >= 1:::OID + +# -2147483649 is (MinInt32 - 1). +query error OID out of range: -2147483649 +SELECT 1:::OID >= -2147483649:::INT8 + +query error OID out of range: -2147483649 +SELECT -2147483649:::INT8 >= 1:::OID diff --git a/pkg/sql/sem/eval/binary_op.go b/pkg/sql/sem/eval/binary_op.go index 7b351578fabd..3f9efed32b27 100644 --- a/pkg/sql/sem/eval/binary_op.go +++ b/pkg/sql/sem/eval/binary_op.go @@ -140,7 +140,10 @@ func (e *evaluator) EvalCompareScalarOp( return tree.DNull, nil } } - cmp := left.Compare(e.ctx(), right) + cmp, err := left.CompareError(e.ctx(), right) + if err != nil { + return nil, err + } return boolFromCmp(cmp, op.ComparisonOperator), nil } diff --git a/pkg/sql/sem/eval/testdata/eval/oid b/pkg/sql/sem/eval/testdata/eval/oid index 8fed6b1bf7b0..bcc755002cf9 100644 --- a/pkg/sql/sem/eval/testdata/eval/oid +++ b/pkg/sql/sem/eval/testdata/eval/oid @@ -82,3 +82,47 @@ eval 1:::OID <= -3:::INT ---- true + +# 4294967295 is MaxUint32. +eval +1:::OID >= 4294967295:::INT8 +---- +false + +eval +4294967295:::INT8 >= 1:::OID +---- +true + +# 4294967296 is (MaxUint32 + 1). +eval +1:::OID >= 4294967296:::INT8 +---- +OID out of range: 4294967296 + +eval +4294967296:::INT8 >= 1:::OID +---- +OID out of range: 4294967296 + +# -2147483648 is MinInt32. +eval +1:::OID >= -2147483648:::INT8 +---- +false + +eval +-2147483648:::INT8 >= 1:::OID +---- +true + +# -2147483649 is (MinInt32 - 1). +eval +1:::OID >= -2147483649:::INT8 +---- +OID out of range: -2147483649 + +eval +-2147483649:::INT8 >= 1:::OID +---- +OID out of range: -2147483649 diff --git a/pkg/sql/sem/tree/datum.go b/pkg/sql/sem/tree/datum.go index b8838f3c1dd0..67cb8453e3ed 100644 --- a/pkg/sql/sem/tree/datum.go +++ b/pkg/sql/sem/tree/datum.go @@ -759,6 +759,9 @@ func (d *DInt) CompareError(ctx CompareContext, other Datum) (int, error) { // OIDs are always unsigned 32-bit integers. Some languages, like Java, // compare OIDs to signed 32-bit integers, so we implement the comparison // by converting to a uint32 first. This matches Postgres behavior. + if thisInt > math.MaxUint32 || thisInt < math.MinInt32 { + return 0, pgerror.Newf(pgcode.NumericValueOutOfRange, "OID out of range: %d", thisInt) + } thisInt = DInt(uint32(thisInt)) v = DInt(t.Oid) default: @@ -4963,6 +4966,9 @@ func (d *DOid) CompareError(ctx CompareContext, other Datum) (int, error) { // OIDs are always unsigned 32-bit integers. Some languages, like Java, // compare OIDs to signed 32-bit integers, so we implement the comparison // by converting to a uint32 first. This matches Postgres behavior. + if *t > math.MaxUint32 || *t < math.MinInt32 { + return 0, pgerror.Newf(pgcode.NumericValueOutOfRange, "OID out of range: %d", *t) + } v = oid.Oid(*t) default: return 0, makeUnsupportedComparisonMessage(d, other)