From 00311d91c2915d21c4ddcf3a05f5fd240931580c Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Sat, 7 Aug 2021 04:18:50 -0400 Subject: [PATCH 1/8] sql: fix COPY CSV so it handles multiple records at a time This was broken since the golang csv reader reads the entire input all at once, so the underlying buffer is consumed. This caused the loop that reads each record to terminate early. Release note (bug fix): Fixed the COPY CSV command so that it handles multiple records separated by newline characters. --- pkg/sql/copy.go | 13 +++++++- pkg/sql/pgwire/testdata/pgtest/copy | 48 +++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/pkg/sql/copy.go b/pkg/sql/copy.go index 531e92d7443e..9cea8f7f8b88 100644 --- a/pkg/sql/copy.go +++ b/pkg/sql/copy.go @@ -323,17 +323,25 @@ func (c *copyMachine) processCopyData(ctx context.Context, data string, final bo } c.buf.WriteString(data) var readFn func(ctx context.Context, final bool) (brk bool, err error) + var checkLoopFn func() bool switch c.format { case tree.CopyFormatText: readFn = c.readTextData + checkLoopFn = func() bool { return c.buf.Len() > 0 } case tree.CopyFormatBinary: readFn = c.readBinaryData + checkLoopFn = func() bool { return c.buf.Len() > 0 } case tree.CopyFormatCSV: readFn = c.readCSVData + // Never exit the loop from this check. Instead, it's up to the readCSVData + // function to break when it's done reading. This is because the csv.Reader + // consumes all of c.buf in one shot, so checking if c.buf is empty would + // cause us to exit the loop early. + checkLoopFn = func() bool { return true } default: panic("unknown copy format") } - for c.buf.Len() > 0 { + for checkLoopFn() { brk, err := readFn(ctx, final) if err != nil { return err @@ -378,6 +386,9 @@ func (c *copyMachine) readCSVData(ctx context.Context, final bool) (brk bool, er record, err := c.csvReader.Read() // Look for end of data before checking for errors, since a field count // error will still return record data. + if record == nil && err == io.EOF { + return true, nil + } if len(record) == 1 && record[0] == endOfData && c.buf.Len() == 0 { return true, nil } diff --git a/pkg/sql/pgwire/testdata/pgtest/copy b/pkg/sql/pgwire/testdata/pgtest/copy index 30505bb4f4cc..1045d9ab20a1 100644 --- a/pkg/sql/pgwire/testdata/pgtest/copy +++ b/pkg/sql/pgwire/testdata/pgtest/copy @@ -262,3 +262,51 @@ ReadyForQuery {"Type":"CopyInResponse","ColumnFormatCodes":[0,0]} {"Type":"ErrorResponse","Code":"22P04"} {"Type":"ReadyForQuery","TxStatus":"I"} + +send +Query {"String": "DELETE FROM t"} +Query {"String": "COPY t FROM STDIN CSV"} +CopyData {"Data": "1,one\n2,two\n3,three"} +CopyDone +Query {"String": "SELECT * FROM t ORDER BY i"} +---- + +until ignore=RowDescription +ReadyForQuery +ReadyForQuery +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"DELETE 3"} +{"Type":"ReadyForQuery","TxStatus":"I"} +{"Type":"CopyInResponse","ColumnFormatCodes":[0,0]} +{"Type":"CommandComplete","CommandTag":"COPY 3"} +{"Type":"ReadyForQuery","TxStatus":"I"} +{"Type":"DataRow","Values":[{"text":"1"},{"text":"one"}]} +{"Type":"DataRow","Values":[{"text":"2"},{"text":"two"}]} +{"Type":"DataRow","Values":[{"text":"3"},{"text":"three"}]} +{"Type":"CommandComplete","CommandTag":"SELECT 3"} +{"Type":"ReadyForQuery","TxStatus":"I"} + +send +Query {"String": "DELETE FROM t"} +Query {"String": "COPY t FROM STDIN DELIMITER ',' NULL ''"} +CopyData {"Data": "1,one\n2,two\n3,three"} +CopyDone +Query {"String": "SELECT * FROM t ORDER BY i"} +---- + +until ignore=RowDescription +ReadyForQuery +ReadyForQuery +ReadyForQuery +---- +{"Type":"CommandComplete","CommandTag":"DELETE 3"} +{"Type":"ReadyForQuery","TxStatus":"I"} +{"Type":"CopyInResponse","ColumnFormatCodes":[0,0]} +{"Type":"CommandComplete","CommandTag":"COPY 3"} +{"Type":"ReadyForQuery","TxStatus":"I"} +{"Type":"DataRow","Values":[{"text":"1"},{"text":"one"}]} +{"Type":"DataRow","Values":[{"text":"2"},{"text":"two"}]} +{"Type":"DataRow","Values":[{"text":"3"},{"text":"three"}]} +{"Type":"CommandComplete","CommandTag":"SELECT 3"} +{"Type":"ReadyForQuery","TxStatus":"I"} From 94c5609697c8975a1c1e48911548f6e29a707081 Mon Sep 17 00:00:00 2001 From: Rail Aliiev Date: Fri, 6 Aug 2021 21:08:20 -0400 Subject: [PATCH 2/8] acceptance: explicitly specify binary path Touches #59446 Previously, in order to run the acceptance test we were implicitly identifying the `cockroach` binary to the acceptance test. This is a bit problematic under bazel, where the binary location may be in different places. This patch explicitly passes the location if the cockroach binary to the acceptance test. Release note: None --- build/teamcity-acceptance.sh | 3 +-- pkg/acceptance/cluster/dockercluster.go | 11 +---------- pkg/acceptance/run.sh | 2 +- 3 files changed, 3 insertions(+), 13 deletions(-) diff --git a/build/teamcity-acceptance.sh b/build/teamcity-acceptance.sh index 6950bfcaeb63..c7bad84dc8f5 100755 --- a/build/teamcity-acceptance.sh +++ b/build/teamcity-acceptance.sh @@ -22,7 +22,6 @@ tc_start_block "Compile CockroachDB" # Buffer noisy output and only print it on failure. run pkg/acceptance/prepare.sh &> artifacts/acceptance-compile.log || (cat artifacts/acceptance-compile.log && false) rm artifacts/acceptance-compile.log -run ln -s cockroach-linux-2.6.32-gnu-amd64 cockroach # For the tests that run without Docker. tc_end_block "Compile CockroachDB" # We need to compile the test binary because we can't invoke it in builder.sh (recursive use of Docker, though @@ -44,5 +43,5 @@ tc_start_block "Run acceptance tests" run_json_test env TZ=America/New_York stdbuf -eL -oL go test \ -mod=vendor -json -timeout 30m -v \ -exec "../../build/teamcity-go-test-precompiled.sh ./pkg/acceptance/acceptance.test" ./pkg/acceptance \ - -l "$TMPDIR" + -l "$TMPDIR" -b "$PWD/cockroach-linux-2.6.32-gnu-amd64" tc_end_block "Run acceptance tests" diff --git a/pkg/acceptance/cluster/dockercluster.go b/pkg/acceptance/cluster/dockercluster.go index ad64b5589435..162eafd12d1e 100644 --- a/pkg/acceptance/cluster/dockercluster.go +++ b/pkg/acceptance/cluster/dockercluster.go @@ -17,7 +17,6 @@ import ( "encoding/json" "flag" "fmt" - "go/build" "io" "io/ioutil" "net" @@ -70,15 +69,7 @@ var waitOnStop = flag.Bool("w", false, "wait for the user to interrupt before te var maxRangeBytes = *zonepb.DefaultZoneConfig().RangeMaxBytes // CockroachBinary is the path to the host-side binary to use. -var CockroachBinary = flag.String("b", func() string { - rootPkg, err := build.Import("github.com/cockroachdb/cockroach", "", build.FindOnly) - if err != nil { - panic(err) - } - // NB: This is the binary produced by our linux-gnu build target. Changes - // to the Makefile must be reflected here. - return filepath.Join(rootPkg.Dir, "cockroach-linux-2.6.32-gnu-amd64") -}(), "the host-side binary to run") +var CockroachBinary = flag.String("b", "", "the host-side binary to run") func exists(path string) bool { if _, err := os.Stat(path); oserror.IsNotExist(err) { diff --git a/pkg/acceptance/run.sh b/pkg/acceptance/run.sh index 10a4f0fa616c..8377ca093b9d 100755 --- a/pkg/acceptance/run.sh +++ b/pkg/acceptance/run.sh @@ -12,4 +12,4 @@ export TMPDIR=$PWD/artifacts/acceptance # For the acceptance tests that run without Docker. make build -make test PKG=./pkg/acceptance TESTTIMEOUT="${TESTTIMEOUT-30m}" TAGS=acceptance TESTFLAGS="${TESTFLAGS--v} -l $TMPDIR" +make test PKG=./pkg/acceptance TESTTIMEOUT="${TESTTIMEOUT-30m}" TAGS=acceptance TESTFLAGS="${TESTFLAGS--v} -b $PWD/cockroach-linux-2.6.32-gnu-amd64 -l $TMPDIR" From b715f9f9668d66198e1fc02f193bb8adef1fe618 Mon Sep 17 00:00:00 2001 From: Jeff Date: Mon, 2 Aug 2021 14:19:18 -0400 Subject: [PATCH 3/8] sqlproxy: add a timeout when dialing tenant pods Dialing dead or non existent pods on GKS can black hole the network connection. This leaves the connection process stuck until the OS TCP timeout of 2 minutes. The SQL proxy and the tenant pod are colocated. 5 seconds should be more than enough to establish the TCP connection. Timeing out will cause the retry loop in proxy_handler.go to refetch the tenant IP address and retry the connection. Release note: None --- pkg/ccl/sqlproxyccl/backend_dialer.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/ccl/sqlproxyccl/backend_dialer.go b/pkg/ccl/sqlproxyccl/backend_dialer.go index 3a00ad3e80de..b0ce1c594c62 100644 --- a/pkg/ccl/sqlproxyccl/backend_dialer.go +++ b/pkg/ccl/sqlproxyccl/backend_dialer.go @@ -13,6 +13,7 @@ import ( "encoding/binary" "io" "net" + "time" "github.com/jackc/pgproto3/v2" ) @@ -20,10 +21,16 @@ import ( // BackendDial is an example backend dialer that does a TCP/IP connection // to a backend, SSL and forwards the start message. It is defined as a variable // so it can be redirected for testing. +// +// BackendDial uses a dial timeout of 5 seconds to mitigate network black +// holes. var BackendDial = func( msg *pgproto3.StartupMessage, outgoingAddress string, tlsConfig *tls.Config, ) (net.Conn, error) { - conn, err := net.Dial("tcp", outgoingAddress) + // TODO this behavior may need to change once multi-region multi-tenant + // clusters are supported. The fixed timeout may need to be replaced by an + // adaptive timeout or the timeout could be replaced by speculative retries. + conn, err := net.DialTimeout("tcp", outgoingAddress, time.Second*5) if err != nil { return nil, newErrorf( codeBackendDown, "unable to reach backend SQL server: %v", err, From 070a799ca8cd4ec785840fe2010ef75d68c79f38 Mon Sep 17 00:00:00 2001 From: Andrew Werner Date: Mon, 9 Aug 2021 14:00:45 -0400 Subject: [PATCH 4/8] sql: rework SHOW JOBS WHEN COMPLETE delegate The previous pattern was fragile because the filter could be re-arranged. Release note: None --- pkg/sql/delegate/show_jobs.go | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/pkg/sql/delegate/show_jobs.go b/pkg/sql/delegate/show_jobs.go index 1b6c7932f576..80f67bccd986 100644 --- a/pkg/sql/delegate/show_jobs.go +++ b/pkg/sql/delegate/show_jobs.go @@ -62,12 +62,21 @@ SHOW JOBS SELECT id FROM system.jobs WHERE created_by_type='%s' and created_by_i sqlStmt := fmt.Sprintf("%s %s %s", selectClause, whereClause, orderbyClause) if n.Block { sqlStmt = fmt.Sprintf( - `SELECT * FROM [%s] - WHERE - IF(finished IS NULL, - IF(pg_sleep(1), crdb_internal.force_retry('24h'), 0), - 0 - ) = 0`, sqlStmt) + ` + WITH jobs AS (SELECT * FROM [%s]), + sleep_and_restart_if_unfinished AS ( + SELECT IF(pg_sleep(1), crdb_internal.force_retry('24h'), 1) + = 0 AS timed_out + FROM (SELECT job_id FROM jobs WHERE finished IS NULL LIMIT 1) + ), + fail_if_slept_too_long AS ( + SELECT crdb_internal.force_error('55000', 'timed out waiting for jobs') + FROM sleep_and_restart_if_unfinished + WHERE timed_out + ) +SELECT * + FROM jobs + WHERE NOT EXISTS(SELECT * FROM fail_if_slept_too_long)`, sqlStmt) } return parse(sqlStmt) } From a7958e2e6e6aa133c537baf9abcf958ff2a00d09 Mon Sep 17 00:00:00 2001 From: Marcus Gartner Date: Mon, 9 Aug 2021 11:55:38 -0700 Subject: [PATCH 5/8] cloud: bump orchestrator version to 21.1.7 Release note: None --- cloud/kubernetes/bring-your-own-certs/client.yaml | 2 +- .../bring-your-own-certs/cockroachdb-statefulset.yaml | 2 +- cloud/kubernetes/client-secure.yaml | 2 +- cloud/kubernetes/cluster-init-secure.yaml | 2 +- cloud/kubernetes/cluster-init.yaml | 2 +- cloud/kubernetes/cockroachdb-statefulset-secure.yaml | 2 +- cloud/kubernetes/cockroachdb-statefulset.yaml | 2 +- cloud/kubernetes/multiregion/client-secure.yaml | 2 +- cloud/kubernetes/multiregion/cluster-init-secure.yaml | 2 +- .../kubernetes/multiregion/cockroachdb-statefulset-secure.yaml | 2 +- .../multiregion/eks/cockroachdb-statefulset-secure-eks.yaml | 2 +- .../kubernetes/performance/cockroachdb-daemonset-insecure.yaml | 2 +- cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml | 2 +- .../performance/cockroachdb-statefulset-insecure.yaml | 2 +- .../kubernetes/performance/cockroachdb-statefulset-secure.yaml | 2 +- cloud/kubernetes/v1.6/client-secure.yaml | 2 +- cloud/kubernetes/v1.6/cluster-init-secure.yaml | 2 +- cloud/kubernetes/v1.6/cluster-init.yaml | 2 +- cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml | 2 +- cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml | 2 +- cloud/kubernetes/v1.7/client-secure.yaml | 2 +- cloud/kubernetes/v1.7/cluster-init-secure.yaml | 2 +- cloud/kubernetes/v1.7/cluster-init.yaml | 2 +- cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml | 2 +- cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml | 2 +- 25 files changed, 25 insertions(+), 25 deletions(-) diff --git a/cloud/kubernetes/bring-your-own-certs/client.yaml b/cloud/kubernetes/bring-your-own-certs/client.yaml index f4dc4eb23345..aa4383aeeb0c 100644 --- a/cloud/kubernetes/bring-your-own-certs/client.yaml +++ b/cloud/kubernetes/bring-your-own-certs/client.yaml @@ -19,7 +19,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 # Keep a pod open indefinitely so kubectl exec can be used to get a shell to it # and run cockroach client commands, such as cockroach sql, cockroach node status, etc. command: diff --git a/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml b/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml index d0c0b8d6e493..4429432695d0 100644 --- a/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml @@ -152,7 +152,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/client-secure.yaml b/cloud/kubernetes/client-secure.yaml index 76e89fcbe584..7c9d928efeeb 100644 --- a/cloud/kubernetes/client-secure.yaml +++ b/cloud/kubernetes/client-secure.yaml @@ -31,7 +31,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/cluster-init-secure.yaml b/cloud/kubernetes/cluster-init-secure.yaml index 711438306c76..83ec6061b3ce 100644 --- a/cloud/kubernetes/cluster-init-secure.yaml +++ b/cloud/kubernetes/cluster-init-secure.yaml @@ -33,7 +33,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/cluster-init.yaml b/cloud/kubernetes/cluster-init.yaml index 1428e32457d0..95810e549153 100644 --- a/cloud/kubernetes/cluster-init.yaml +++ b/cloud/kubernetes/cluster-init.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/cockroachdb-statefulset-secure.yaml index 22dc7bab9a40..4eec5644d36c 100644 --- a/cloud/kubernetes/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/cockroachdb-statefulset-secure.yaml @@ -194,7 +194,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/cockroachdb-statefulset.yaml b/cloud/kubernetes/cockroachdb-statefulset.yaml index 277de94bbad4..8f8172cc8092 100644 --- a/cloud/kubernetes/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/cockroachdb-statefulset.yaml @@ -97,7 +97,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/multiregion/client-secure.yaml b/cloud/kubernetes/multiregion/client-secure.yaml index 55774211ffdd..542c47aab77c 100644 --- a/cloud/kubernetes/multiregion/client-secure.yaml +++ b/cloud/kubernetes/multiregion/client-secure.yaml @@ -8,7 +8,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/multiregion/cluster-init-secure.yaml b/cloud/kubernetes/multiregion/cluster-init-secure.yaml index ba006b5dd1db..b76e48ff7de5 100644 --- a/cloud/kubernetes/multiregion/cluster-init-secure.yaml +++ b/cloud/kubernetes/multiregion/cluster-init-secure.yaml @@ -10,7 +10,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cluster-init - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml index adaf2c110362..801b3dec9dd2 100644 --- a/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml @@ -166,7 +166,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml b/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml index f80e82153637..4a4b1402e21d 100644 --- a/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml +++ b/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml @@ -184,7 +184,7 @@ spec: name: cockroach-env containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml b/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml index 451f83d53fba..135477adc22f 100644 --- a/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml @@ -81,7 +81,7 @@ spec: hostNetwork: true containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent # TODO: If you configured taints to give CockroachDB exclusive access to nodes, feel free # to remove the requests and limits sections. If you didn't, you'll need to change these to diff --git a/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml b/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml index 2b7f0ad8c369..e91bf3b65467 100644 --- a/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml @@ -197,7 +197,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent # TODO: If you configured taints to give CockroachDB exclusive access to nodes, feel free # to remove the requests and limits sections. If you didn't, you'll need to change these to diff --git a/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml b/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml index f51bc90431ee..7f2e70ee711a 100644 --- a/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml @@ -140,7 +140,7 @@ spec: - name: cockroachdb # NOTE: Always use the most recent version of CockroachDB for the best # performance and reliability. - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml index 90e23395e57e..38ef4d3dcc35 100644 --- a/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml @@ -231,7 +231,7 @@ spec: - name: cockroachdb # NOTE: Always use the most recent version of CockroachDB for the best # performance and reliability. - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/v1.6/client-secure.yaml b/cloud/kubernetes/v1.6/client-secure.yaml index 76e89fcbe584..7c9d928efeeb 100644 --- a/cloud/kubernetes/v1.6/client-secure.yaml +++ b/cloud/kubernetes/v1.6/client-secure.yaml @@ -31,7 +31,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.6/cluster-init-secure.yaml b/cloud/kubernetes/v1.6/cluster-init-secure.yaml index 711438306c76..83ec6061b3ce 100644 --- a/cloud/kubernetes/v1.6/cluster-init-secure.yaml +++ b/cloud/kubernetes/v1.6/cluster-init-secure.yaml @@ -33,7 +33,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.6/cluster-init.yaml b/cloud/kubernetes/v1.6/cluster-init.yaml index 1428e32457d0..95810e549153 100644 --- a/cloud/kubernetes/v1.6/cluster-init.yaml +++ b/cloud/kubernetes/v1.6/cluster-init.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml index 42a63a2d28c4..52a9dcd7720c 100644 --- a/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml @@ -177,7 +177,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml b/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml index 77ad86c3b3c9..29be09638395 100644 --- a/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml @@ -80,7 +80,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.7/client-secure.yaml b/cloud/kubernetes/v1.7/client-secure.yaml index 76e89fcbe584..7c9d928efeeb 100644 --- a/cloud/kubernetes/v1.7/client-secure.yaml +++ b/cloud/kubernetes/v1.7/client-secure.yaml @@ -31,7 +31,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.7/cluster-init-secure.yaml b/cloud/kubernetes/v1.7/cluster-init-secure.yaml index 711438306c76..83ec6061b3ce 100644 --- a/cloud/kubernetes/v1.7/cluster-init-secure.yaml +++ b/cloud/kubernetes/v1.7/cluster-init-secure.yaml @@ -33,7 +33,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.7/cluster-init.yaml b/cloud/kubernetes/v1.7/cluster-init.yaml index 1428e32457d0..95810e549153 100644 --- a/cloud/kubernetes/v1.7/cluster-init.yaml +++ b/cloud/kubernetes/v1.7/cluster-init.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml index 211ad604941a..c00db5022086 100644 --- a/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml @@ -189,7 +189,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml b/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml index c6577e9c1dfd..ccde057131f7 100644 --- a/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml @@ -92,7 +92,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.1.6 + image: cockroachdb/cockroach:v21.1.7 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 From 8d5751de1a5a03df1638062540a97e1cc2ec47bf Mon Sep 17 00:00:00 2001 From: Yahor Yuzefovich Date: Thu, 5 Aug 2021 20:00:33 -0700 Subject: [PATCH 6/8] sql: change physical planning heuristics a bit to prefer local execution This commit changes two parts of the physical planner heuristics: - we now say that the lookup join "can be distributed" rather than "should be distributed" - for top K sort we also say that it "can be" rather than "should be" distributed. I'm not certain whether both of these changes are always beneficial, but here is some justification. The change to the lookup join heuristic will make it so that the distribution of the join reader stage is determined by other stages of the physical plan in `distsql=auto` mode. Consider an example when the input to the lookup join is the table reader that scans only a handful of rows. Previously, because of the "should distribute" heuristic, such a plan would be "distributed" meaning we would plan a single table reader on the leaseholder for the relevant range (most likely a remote node from the perspective of the gateway node for the query); this, in turn, would force the planning of the join reader on the same node, and all consequent stages - if any - too. Such a decision can create a hotspot if that particular range is queried often (think append-only access pattern where the latest data is accessed most frequently). With this change in such a scenario we will get more even compute utilization across the cluster because the flow will be fully planned on the gateway (which assumed to be chosen randomly by a load balancer), and the lookup join will be performed from the gateway (we'll still need to perform a remote read from the leaseholder of that single range). The change to the top K sort heuristic seems less controversial to me, yet I don't have a good justification. My feeling is that usually the value of K is small, so it's ok if we don't "force" ourselves to distribute the sort operation if the physical plan otherwise isn't calling for it. Overall, the choice of making changes to these two heuristics isn't very principled and is driven by a single query from one of our largest customers which happened to hit the hot spot scenario as described above. In their case, they have append-like workload that is constantly updating a single range. Eventually that range is split automatically, but both new ranges stay on the same node. The latest data is accessed far more frequently than any other data in the table, yet according to the KV heuristics the ranges aren't being reallocated because the scans hitting the hot ranges aren't exceeding the threshold. What isn't accounted for is the fact that other parts of the flow are far more compute-intensive, so this change attempts to alleviate such a hot node scenario. Release note (sql change): Some queries with lookup joins and/or top K sorts are now more likely to be executed in "local" manner with `distsql=auto` session variable. --- .../testdata/logic_test/regional_by_row | 8 +-- pkg/sql/distsql_physical_planner.go | 49 ++++++++++--------- pkg/sql/exec_util.go | 2 +- .../execbuilder/testdata/distsql_auto_mode | 20 +++++++- 4 files changed, 51 insertions(+), 28 deletions(-) diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row index ea7cd3d16342..e7def48f5f74 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row @@ -926,7 +926,7 @@ SELECT message FROM [SHOW KV TRACE FOR SESSION] WITH ORDINALITY OR message LIKE 'Scan%' ORDER BY ordinality ASC ---- -Scan /Table/75/1/"@"/10/0{-/NULL}, /Table/75/1/"\x80"/10/0{-/NULL}, /Table/75/1/"\xc0"/10/0{-/NULL} +Scan /Table/75/1/"@"/10/0, /Table/75/1/"\x80"/10/0, /Table/75/1/"\xc0"/10/0 fetched: /child/primary/'ap-southeast-2'/10/c_p_id -> /10 Scan /Table/74/1/"@"/10/0, /Table/74/1/"\x80"/10/0, /Table/74/1/"\xc0"/10/0 fetched: /parent/primary/'ap-southeast-2'/10 -> NULL @@ -956,7 +956,7 @@ SELECT message FROM [SHOW KV TRACE FOR SESSION] WITH ORDINALITY OR message LIKE 'Scan%' ORDER BY ordinality ASC ---- -Scan /Table/75/1/"@"/10/0{-/NULL}, /Table/75/1/"\x80"/10/0{-/NULL}, /Table/75/1/"\xc0"/10/0{-/NULL} +Scan /Table/75/1/"@"/10/0, /Table/75/1/"\x80"/10/0, /Table/75/1/"\xc0"/10/0 fetched: /child/primary/'ap-southeast-2'/10/c_p_id -> /10 Scan /Table/74/1/"@"/10/0, /Table/74/1/"\x80"/10/0, /Table/74/1/"\xc0"/10/0 fetched: /parent/primary/'ap-southeast-2'/10 -> NULL @@ -987,7 +987,7 @@ SELECT message FROM [SHOW KV TRACE FOR SESSION] WITH ORDINALITY OR message LIKE 'Scan%' ORDER BY ordinality ASC ---- -Scan /Table/75/1/"@"/10/0{-/NULL}, /Table/75/1/"\x80"/10/0{-/NULL}, /Table/75/1/"\xc0"/10/0{-/NULL} +Scan /Table/75/1/"@"/10/0, /Table/75/1/"\x80"/10/0, /Table/75/1/"\xc0"/10/0 fetched: /child/primary/'ap-southeast-2'/10/c_p_id -> /10 Scan /Table/74/1/"@"/10/0, /Table/74/1/"\x80"/10/0, /Table/74/1/"\xc0"/10/0 fetched: /parent/primary/'ap-southeast-2'/10 -> NULL @@ -1018,7 +1018,7 @@ SELECT message FROM [SHOW KV TRACE FOR SESSION] WITH ORDINALITY OR message LIKE 'Scan%' ORDER BY ordinality ASC ---- -Scan /Table/75/1/"@"/10/0{-/NULL}, /Table/75/1/"\x80"/10/0{-/NULL}, /Table/75/1/"\xc0"/10/0{-/NULL} +Scan /Table/75/1/"@"/10/0, /Table/75/1/"\x80"/10/0, /Table/75/1/"\xc0"/10/0 fetched: /child/primary/'ap-southeast-2'/10/c_p_id -> /10 Scan /Table/74/1/"@"/10/0, /Table/74/1/"\x80"/10/0, /Table/74/1/"\xc0"/10/0 fetched: /parent/primary/'ap-southeast-2'/10 -> NULL diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index f08895203817..d4492cddded7 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -364,23 +364,23 @@ func mustWrapValuesNode(planCtx *PlanningCtx, specifiedInQuery bool) bool { // The error doesn't indicate complete failure - it's instead the reason that // this plan couldn't be distributed. // TODO(radu): add tests for this. -func checkSupportForPlanNode(node planNode) (distRecommendation, error) { +func checkSupportForPlanNode(node planNode, outputNodeHasLimit bool) (distRecommendation, error) { switch n := node.(type) { // Keep these cases alphabetized, please! case *distinctNode: - return checkSupportForPlanNode(n.plan) + return checkSupportForPlanNode(n.plan, false /* outputNodeHasLimit */) case *exportNode: - return checkSupportForPlanNode(n.source) + return checkSupportForPlanNode(n.source, false /* outputNodeHasLimit */) case *filterNode: if err := checkExpr(n.filter); err != nil { return cannotDistribute, err } - return checkSupportForPlanNode(n.source.plan) + return checkSupportForPlanNode(n.source.plan, false /* outputNodeHasLimit */) case *groupNode: - rec, err := checkSupportForPlanNode(n.plan) + rec, err := checkSupportForPlanNode(n.plan, false /* outputNodeHasLimit */) if err != nil { return cannotDistribute, err } @@ -390,10 +390,10 @@ func checkSupportForPlanNode(node planNode) (distRecommendation, error) { case *indexJoinNode: // n.table doesn't have meaningful spans, but we need to check support (e.g. // for any filtering expression). - if _, err := checkSupportForPlanNode(n.table); err != nil { + if _, err := checkSupportForPlanNode(n.table, false /* outputNodeHasLimit */); err != nil { return cannotDistribute, err } - return checkSupportForPlanNode(n.input) + return checkSupportForPlanNode(n.input, false /* outputNodeHasLimit */) case *invertedFilterNode: return checkSupportForInvertedFilterNode(n) @@ -402,7 +402,7 @@ func checkSupportForPlanNode(node planNode) (distRecommendation, error) { if err := checkExpr(n.onExpr); err != nil { return cannotDistribute, err } - rec, err := checkSupportForPlanNode(n.input) + rec, err := checkSupportForPlanNode(n.input, false /* outputNodeHasLimit */) if err != nil { return cannotDistribute, err } @@ -412,11 +412,11 @@ func checkSupportForPlanNode(node planNode) (distRecommendation, error) { if err := checkExpr(n.pred.onCond); err != nil { return cannotDistribute, err } - recLeft, err := checkSupportForPlanNode(n.left.plan) + recLeft, err := checkSupportForPlanNode(n.left.plan, false /* outputNodeHasLimit */) if err != nil { return cannotDistribute, err } - recRight, err := checkSupportForPlanNode(n.right.plan) + recRight, err := checkSupportForPlanNode(n.right.plan, false /* outputNodeHasLimit */) if err != nil { return cannotDistribute, err } @@ -433,7 +433,7 @@ func checkSupportForPlanNode(node planNode) (distRecommendation, error) { // Note that we don't need to check whether we support distribution of // n.countExpr or n.offsetExpr because those expressions are evaluated // locally, during the physical planning. - return checkSupportForPlanNode(n.plan) + return checkSupportForPlanNode(n.plan, true /* outputNodeHasLimit */) case *lookupJoinNode: if n.table.lockingStrength != descpb.ScanLockingStrength_FOR_NONE { @@ -453,11 +453,11 @@ func checkSupportForPlanNode(node planNode) (distRecommendation, error) { if err := checkExpr(n.onCond); err != nil { return cannotDistribute, err } - rec, err := checkSupportForPlanNode(n.input) + rec, err := checkSupportForPlanNode(n.input, false /* outputNodeHasLimit */) if err != nil { return cannotDistribute, err } - return rec.compose(shouldDistribute), nil + return rec.compose(canDistribute), nil case *ordinalityNode: // WITH ORDINALITY never gets distributed so that the gateway node can @@ -465,7 +465,7 @@ func checkSupportForPlanNode(node planNode) (distRecommendation, error) { return cannotDistribute, nil case *projectSetNode: - return checkSupportForPlanNode(n.source) + return checkSupportForPlanNode(n.source, false /* outputNodeHasLimit */) case *renderNode: for _, e := range n.render { @@ -473,7 +473,7 @@ func checkSupportForPlanNode(node planNode) (distRecommendation, error) { return cannotDistribute, err } } - return checkSupportForPlanNode(n.source.plan) + return checkSupportForPlanNode(n.source.plan, outputNodeHasLimit) case *scanNode: if n.lockingStrength != descpb.ScanLockingStrength_FOR_NONE { @@ -502,23 +502,28 @@ func checkSupportForPlanNode(node planNode) (distRecommendation, error) { } case *sortNode: - rec, err := checkSupportForPlanNode(n.plan) + rec, err := checkSupportForPlanNode(n.plan, false /* outputNodeHasLimit */) if err != nil { return cannotDistribute, err } - // If we have to sort, distribute the query. - rec = rec.compose(shouldDistribute) + if outputNodeHasLimit { + // If we have a top K sort, we can distribute the query. + rec = rec.compose(canDistribute) + } else { + // If we have to sort, distribute the query. + rec = rec.compose(shouldDistribute) + } return rec, nil case *unaryNode: return canDistribute, nil case *unionNode: - recLeft, err := checkSupportForPlanNode(n.left) + recLeft, err := checkSupportForPlanNode(n.left, false /* outputNodeHasLimit */) if err != nil { return cannotDistribute, err } - recRight, err := checkSupportForPlanNode(n.right) + recRight, err := checkSupportForPlanNode(n.right, false /* outputNodeHasLimit */) if err != nil { return cannotDistribute, err } @@ -542,7 +547,7 @@ func checkSupportForPlanNode(node planNode) (distRecommendation, error) { return canDistribute, nil case *windowNode: - return checkSupportForPlanNode(n.plan) + return checkSupportForPlanNode(n.plan, false /* outputNodeHasLimit */) case *zeroNode: return canDistribute, nil @@ -565,7 +570,7 @@ func checkSupportForPlanNode(node planNode) (distRecommendation, error) { } func checkSupportForInvertedFilterNode(n *invertedFilterNode) (distRecommendation, error) { - rec, err := checkSupportForPlanNode(n.input) + rec, err := checkSupportForPlanNode(n.input, false /* outputNodeHasLimit */) if err != nil { return cannotDistribute, err } diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index ba3c2383a5e4..d1d716d6e6cd 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -1269,7 +1269,7 @@ func getPlanDistribution( return physicalplan.LocalPlan } - rec, err := checkSupportForPlanNode(plan.planNode) + rec, err := checkSupportForPlanNode(plan.planNode, false /* outputNodeHasLimit */) if err != nil { // Don't use distSQL for this request. log.VEventf(ctx, 1, "query not supported for distSQL: %s", err) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_auto_mode b/pkg/sql/opt/exec/execbuilder/testdata/distsql_auto_mode index b77f3cc1fbb7..9023375292d3 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_auto_mode +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_auto_mode @@ -68,10 +68,16 @@ SELECT info FROM [EXPLAIN SELECT * FROM kv UNION SELECT * FROM kv LIMIT 1] WHERE ---- distribution: full -# Limit after sort - distribute. +# Limit after sort (i.e. top K sort) - don't distribute. query T SELECT info FROM [EXPLAIN SELECT * FROM kv WHERE k>1 ORDER BY v LIMIT 1] WHERE info LIKE 'distribution%' ---- +distribution: local + +# General sort - distribute. +query T +SELECT info FROM [EXPLAIN SELECT * FROM kv WHERE k>1 ORDER BY v] WHERE info LIKE 'distribution%' +---- distribution: full # Limit after aggregation - distribute. @@ -115,3 +121,15 @@ query T SELECT info FROM [EXPLAIN SELECT * FROM abc WHERE b=1 AND a%2=0] WHERE info LIKE 'distribution%' ---- distribution: local + +# Lookup join - don't distribute. +query T +SELECT info FROM [EXPLAIN SELECT a FROM abc INNER LOOKUP JOIN kv ON b = k WHERE k < 10] WHERE info LIKE 'distribution%' +---- +distribution: local + +# Lookup join on top of the full scan - distribute. +query T +SELECT info FROM [EXPLAIN SELECT a FROM abc INNER LOOKUP JOIN kv ON b = k] WHERE info LIKE 'distribution%' +---- +distribution: full From 64666e8996f39e9e93d0cb9079b1eb4f3eb0e73b Mon Sep 17 00:00:00 2001 From: Joel Kenny Date: Thu, 5 Aug 2021 12:13:25 -0400 Subject: [PATCH 7/8] release: update cockroach-cloud-images repository location This commit updates the location of the cockroach-cloud-images Docker repository. For reasons described in https://cockroachlabs.atlassian.net/browse/CC-4667, we are switching our internal CockroachDB Docker repository to this location, which uses Google Artifact Registry. Release note: None --- build/release/teamcity-make-and-publish-build.sh | 10 +++++----- build/release/teamcity-mark-build.sh | 4 ++-- build/release/teamcity-publish-release.sh | 10 +++++----- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/build/release/teamcity-make-and-publish-build.sh b/build/release/teamcity-make-and-publish-build.sh index 5323f63b5a20..f4d24f5bea49 100755 --- a/build/release/teamcity-make-and-publish-build.sh +++ b/build/release/teamcity-make-and-publish-build.sh @@ -16,18 +16,18 @@ is_custom_build="$(echo "$TC_BUILD_BRANCH" | grep -Eo "^custombuild-" || echo "" if [[ -z "${DRY_RUN}" ]] ; then bucket="${BUCKET-cockroach-builds}" - google_credentials=$GOOGLE_COCKROACH_CLOUD_IMAGES_CREDENTIALS - gcr_repository="us.gcr.io/cockroach-cloud-images/cockroach" + google_credentials=$GOOGLE_COCKROACH_CLOUD_IMAGES_COCKROACHDB_CREDENTIALS + gcr_repository="us-docker.pkg.dev/cockroach-cloud-images/cockroachdb/cockroach" + # Used for docker login for gcloud + gcr_hostname="us-docker.pkg.dev" else bucket="${BUCKET:-cockroach-builds-test}" google_credentials="$GOOGLE_COCKROACH_RELEASE_CREDENTIALS" gcr_repository="us.gcr.io/cockroach-release/cockroach-test" build_name="${build_name}.dryrun" + gcr_hostname="us.gcr.io" fi -# Used for docker login for gcloud -gcr_hostname="us.gcr.io" - cat << EOF build_name: $build_name diff --git a/build/release/teamcity-mark-build.sh b/build/release/teamcity-mark-build.sh index c3fb1170de13..25acb1da858e 100755 --- a/build/release/teamcity-mark-build.sh +++ b/build/release/teamcity-mark-build.sh @@ -13,8 +13,8 @@ mark_build() { release_branch="$(echo "$TC_BUILD_BRANCH" | grep -Eo "^v[0-9]+\.[0-9]+" || echo"")" if [[ -z "${DRY_RUN}" ]] ; then - google_credentials=$GOOGLE_COCKROACH_CLOUD_IMAGES_CREDENTIALS - gcr_repository="us.gcr.io/cockroach-cloud-images/cockroach" + google_credentials=$GOOGLE_COCKROACH_CLOUD_IMAGES_COCKROACHDB_CREDENTIALS + gcr_repository="us-docker.pkg.dev/cockroach-cloud-images/cockroachdb/cockroach" else google_credentials=$GOOGLE_COCKROACH_RELEASE_CREDENTIALS gcr_repository="us.gcr.io/cockroach-release/cockroach-test" diff --git a/build/release/teamcity-publish-release.sh b/build/release/teamcity-publish-release.sh index e2f299770ff2..d0f77541003e 100755 --- a/build/release/teamcity-publish-release.sh +++ b/build/release/teamcity-publish-release.sh @@ -24,13 +24,15 @@ release_branch=$(echo ${build_name} | grep -E -o '^v[0-9]+\.[0-9]+') if [[ -z "${DRY_RUN}" ]] ; then bucket="${BUCKET:-binaries.cockroachdb.com}" - google_credentials="$GOOGLE_COCKROACH_CLOUD_IMAGES_CREDENTIALS" + google_credentials="$GOOGLE_COCKROACH_CLOUD_IMAGES_COCKROACHDB_CREDENTIALS" if [[ -z "${PRE_RELEASE}" ]] ; then dockerhub_repository="docker.io/cockroachdb/cockroach" else dockerhub_repository="docker.io/cockroachdb/cockroach-unstable" fi - gcr_repository="us.gcr.io/cockroach-cloud-images/cockroach" + gcr_repository="us-docker.pkg.dev/cockroach-cloud-images/cockroachdb/cockroach" + # Used for docker login for gcloud + gcr_hostname="us-docker.pkg.dev" s3_download_hostname="${bucket}" git_repo_for_tag="cockroachdb/cockroach" else @@ -38,6 +40,7 @@ else google_credentials="$GOOGLE_COCKROACH_RELEASE_CREDENTIALS" dockerhub_repository="docker.io/cockroachdb/cockroach-misc" gcr_repository="us.gcr.io/cockroach-release/cockroach-test" + gcr_hostname="us.gcr.io" s3_download_hostname="${bucket}.s3.amazonaws.com" git_repo_for_tag="cockroachlabs/release-staging" if [[ -z "$(echo ${build_name} | grep -E -o '^v[0-9]+\.[0-9]+\.[0-9]+$')" ]] ; then @@ -52,9 +55,6 @@ else fi fi -# Used for docker login for gcloud -gcr_hostname="us.gcr.io" - tc_end_block "Variable Setup" From 89bb488254ce8bdfeb89b9e84afc14e6d9ff066b Mon Sep 17 00:00:00 2001 From: Marcus Gartner Date: Mon, 9 Aug 2021 13:41:15 -0700 Subject: [PATCH 8/8] roachtest: bump predecessor to v21.1.7 Release note: None --- pkg/cmd/roachtest/tests/predecessor_version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cmd/roachtest/tests/predecessor_version.go b/pkg/cmd/roachtest/tests/predecessor_version.go index 0ae0340f4cb0..5da84d9abc94 100644 --- a/pkg/cmd/roachtest/tests/predecessor_version.go +++ b/pkg/cmd/roachtest/tests/predecessor_version.go @@ -34,7 +34,7 @@ func PredecessorVersion(buildVersion version.Version) (string, error) { // (see runVersionUpgrade). The same is true for adding a new key to this // map. verMap := map[string]string{ - "21.2": "21.1.6", + "21.2": "21.1.7", "21.1": "20.2.12", "20.2": "20.1.16", "20.1": "19.2.11",