Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
112470: upgrades: make stmt diag upgrade idempotent r=yuzefovich a=yuzefovich

All upgrades are expected to be idempotent, but the stmt diag upgrade (needed for plan-gist batched matching) wasn't - we needed to add `IF EXISTS` clause to the `DROP INDEX` stmt (which doesn't have a meaningful `schemaExistsFn`). Additionally, we can combine two stmts that add a single column into one that adds two.

Epic: None

Release note: None

112496: roachtest: use tpch workload in import-cancellation r=yuzefovich a=yuzefovich

Previously, we were using `querybench` to run TPCH queries after the import succeeded. The comments around the code suggest that we wanted to assert that the correct results were obtained, meaning that there was no data corruption during cancelled imports. However, `querybench` doesn't do any kind of verification, so this commit switches to using `tpch` workload with `--enable-checks=true` which does the desired verification.

Noticed this when looking at #111985.

Epic: None

Release note: None

112532: upgrade: Increase timeout for TestTenantAutoUpgrade under stress r=stevendanna a=ajstorm

Test times out under stress. With updated timeout it now passes on a local repro.

Fixes: #112158

Release note: None

112543: streamingccl: unskip TestStreamingRegionalConstraint r=kvoli a=msbutler

This patch unskips TestStreamingRegionalConstraint under a non-stress build.

Fixes #111541

Release note: none

Co-authored-by: Yahor Yuzefovich <[email protected]>
Co-authored-by: Adam Storm <[email protected]>
Co-authored-by: Michael Butler <[email protected]>
  • Loading branch information
4 people committed Oct 17, 2023
5 parents 46c9273 + ff192dd + e6238e6 + d59fa1c + 7ace7d5 commit 58957a2
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 34 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ func TestTenantAutoUpgrade(t *testing.T) {
UpgradeTo roachpb.Version
}
succeedsSoon := 20 * time.Second
if util.RaceEnabled {
if util.RaceEnabled || skip.Stress() {
succeedsSoon = 60 * time.Second
}
// Wait for auto upgrade status to be received by the testing knob.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1169,8 +1169,8 @@ func TestLoadProducerAndIngestionProgress(t *testing.T) {
func TestStreamingRegionalConstraint(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.WithIssue(t, 111541)
skip.UnderStressRace(t, "takes too long under stress race")
skip.UnderStress(t, "the allocator machinery stuggles with cpu contention, which can cause the test to timeout")

ctx := context.Background()
regions := []string{"mars", "venus", "mercury"}
Expand Down
24 changes: 6 additions & 18 deletions pkg/cmd/roachtest/tests/import_cancellation.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test"
"github.com/cockroachdb/cockroach/pkg/roachprod/install"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/cockroach/pkg/workload/tpch"
"github.com/cockroachdb/errors"
)

Expand Down Expand Up @@ -54,19 +55,6 @@ func runImportCancellation(ctx context.Context, t test.Test, c cluster.Cluster)
t.Status("starting csv servers")
c.Run(ctx, c.All(), `./cockroach workload csv-server --port=8081 &> logs/workload-csv-server.log < /dev/null &`)

// Download the tpch queries file. After the import, we'll run tpch queries
// against the imported tables.
const queriesFilename = "tpch"
const queriesURL = "https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/workload/querybench/tpch-queries"
t.Status(fmt.Sprintf("downloading %s query file from %s", queriesFilename, queriesURL))
if err := c.RunE(ctx, c.Node(1), fmt.Sprintf("curl %s > %s", queriesURL, queriesFilename)); err != nil {
t.Fatal(err)
}
numQueries, err := getNumQueriesInFile(queriesFilename, queriesURL)
if err != nil {
t.Fatal(err)
}

// Create the tables.
conn := c.Conn(ctx, t.L(), 1)
t.Status("creating SQL tables")
Expand Down Expand Up @@ -143,7 +131,7 @@ func runImportCancellation(ctx context.Context, t test.Test, c cluster.Cluster)
// that becomes GC'd.
for tbl := range tablesToNumFiles {
stmt := fmt.Sprintf(`ALTER TABLE csv.%s CONFIGURE ZONE USING gc.ttlseconds = $1`, tbl)
_, err = conn.ExecContext(ctx, stmt, 60*60*4 /* 4 hours */)
_, err := conn.ExecContext(ctx, stmt, 60*60*4 /* 4 hours */)
if err != nil {
t.Fatal(err)
}
Expand All @@ -158,12 +146,12 @@ func runImportCancellation(ctx context.Context, t test.Test, c cluster.Cluster)
// were run 2 times.
const numRunsPerQuery = 2
const maxLatency = 500 * time.Second
maxOps := numRunsPerQuery * numQueries
maxOps := numRunsPerQuery * tpch.NumQueries
cmd := fmt.Sprintf(
"./workload run querybench --db=csv --concurrency=1 --query-file=%s "+
"--num-runs=%d --max-ops=%d {pgurl%s} "+
"./workload run tpch --db=csv --concurrency=1 --num-runs=%d "+
"--max-ops=%d {pgurl%s} --enable-checks=true "+
"--histograms="+t.PerfArtifactsDir()+"/stats.json --histograms-max-latency=%s",
queriesFilename, numRunsPerQuery, maxOps, c.All(), maxLatency.String())
numRunsPerQuery, maxOps, c.All(), maxLatency.String())
if err := c.RunE(ctx, c.Node(1), cmd); err != nil {
t.Fatal(err)
}
Expand Down
19 changes: 5 additions & 14 deletions pkg/upgrade/upgrades/plan_gist_stmt_diagnostics_requests.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,16 @@ import (
// Target schema changes in the system.statement_diagnostics_requests table,
// adding two columns and updating the secondary index to store those columns.
const (
addPlanGistColToStmtDiagReqs = `
ALTER TABLE system.statement_diagnostics_requests
ADD COLUMN plan_gist STRING NULL FAMILY "primary"`

addAntiPlanGistColToStmtDiagReqs = `
addPlanGistColsToStmtDiagReqs = `
ALTER TABLE system.statement_diagnostics_requests
ADD COLUMN plan_gist STRING NULL FAMILY "primary",
ADD COLUMN anti_plan_gist BOOL NULL FAMILY "primary"`

createCompletedIdxV2 = `
CREATE INDEX completed_idx_v2 ON system.statement_diagnostics_requests (completed, ID)
STORING (statement_fingerprint, min_execution_latency, expires_at, sampling_probability, plan_gist, anti_plan_gist)`

dropCompletedIdx = `DROP INDEX system.statement_diagnostics_requests@completed_idx`
dropCompletedIdx = `DROP INDEX IF EXISTS system.statement_diagnostics_requests@completed_idx`
)

// stmtDiagForPlanGistMigration changes the schema of the
Expand All @@ -46,14 +43,8 @@ func stmtDiagForPlanGistMigration(
for _, op := range []operation{
{
name: "add-stmt-diag-reqs-plan-gist-column",
schemaList: []string{"plan_gist"},
query: addPlanGistColToStmtDiagReqs,
schemaExistsFn: hasColumn,
},
{
name: "add-stmt-diag-reqs-anti-plan-gist-column",
schemaList: []string{"anti_plan_gist"},
query: addAntiPlanGistColToStmtDiagReqs,
schemaList: []string{"plan_gist", "anti_plan_gist"},
query: addPlanGistColsToStmtDiagReqs,
schemaExistsFn: hasColumn,
},
{
Expand Down

0 comments on commit 58957a2

Please sign in to comment.