Skip to content

Commit

Permalink
roachtest: avoid access to globals in clusterSpec
Browse files Browse the repository at this point in the history
`clusterSpec` is part of the `Cluster` interface and we want to extract
it into a subpackage. Since it is using the `cloud` flag, this was
awkward. Avoid use of this singleton and plumb things in properly from
the top. This is a very large diff, but it's almost all mechanical.

We also avoid referencing `local`, a similar flag. It is now folded
into `s.Cloud==spec.Local`, at least in the context of `clusterSpec`;
phasing out the `var local` entirely is left as a TODO.

Release note: None
  • Loading branch information
tbg committed Jun 15, 2021
1 parent 4e16720 commit 7dc3736
Show file tree
Hide file tree
Showing 88 changed files with 268 additions and 224 deletions.
3 changes: 3 additions & 0 deletions pkg/cmd/roachtest/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ go_library(
"cluster.go",
"cluster_init.go",
"cluster_interface.go",
"cluster_spec.go",
"connection_latency.go",
"copy.go",
"decommission.go",
Expand Down Expand Up @@ -136,6 +137,7 @@ go_library(
"//pkg/cmd/internal/issues",
"//pkg/cmd/roachtest/logger",
"//pkg/cmd/roachtest/option",
"//pkg/cmd/roachtest/spec",
"//pkg/gossip",
"//pkg/internal/sqlsmith",
"//pkg/internal/team",
Expand Down Expand Up @@ -217,6 +219,7 @@ go_test(
deps = [
"//pkg/cmd/roachtest/logger",
"//pkg/cmd/roachtest/option",
"//pkg/cmd/roachtest/spec",
"//pkg/testutils",
"//pkg/testutils/skip",
"//pkg/util/syncutil",
Expand Down
2 changes: 1 addition & 1 deletion pkg/cmd/roachtest/acceptance.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func registerAcceptance(r *testRegistry) {

spec := specTemplate
spec.Owner = owner
spec.Cluster = makeClusterSpec(numNodes)
spec.Cluster = r.makeClusterSpec(numNodes)
spec.Skip = tc.skip
spec.Name = specTemplate.Name + "/" + tc.name
spec.MinVersion = tc.minVersion
Expand Down
2 changes: 1 addition & 1 deletion pkg/cmd/roachtest/activerecord.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ func registerActiveRecord(r *testRegistry) {
MinVersion: "v20.2.0",
Name: "activerecord",
Owner: OwnerSQLExperience,
Cluster: makeClusterSpec(1),
Cluster: r.makeClusterSpec(1),
Tags: []string{`default`, `orm`},
Run: runActiveRecord,
})
Expand Down
6 changes: 3 additions & 3 deletions pkg/cmd/roachtest/allocator.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,15 +74,15 @@ func registerAllocator(r *testRegistry) {
r.Add(testSpec{
Name: `replicate/up/1to3`,
Owner: OwnerKV,
Cluster: makeClusterSpec(3),
Cluster: r.makeClusterSpec(3),
Run: func(ctx context.Context, t *test, c Cluster) {
runAllocator(ctx, t, c, 1, 10.0)
},
})
r.Add(testSpec{
Name: `replicate/rebalance/3to5`,
Owner: OwnerKV,
Cluster: makeClusterSpec(5),
Cluster: r.makeClusterSpec(5),
Run: func(ctx context.Context, t *test, c Cluster) {
runAllocator(ctx, t, c, 3, 42.0)
},
Expand All @@ -91,7 +91,7 @@ func registerAllocator(r *testRegistry) {
Name: `replicate/wide`,
Owner: OwnerKV,
Timeout: 10 * time.Minute,
Cluster: makeClusterSpec(9, cpu(1)),
Cluster: r.makeClusterSpec(9, cpu(1)),
MinVersion: "v19.2.0",
Run: runWideReplication,
})
Expand Down
6 changes: 3 additions & 3 deletions pkg/cmd/roachtest/alterpk.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ func registerAlterPK(r *testRegistry) {
// Use a 4 node cluster -- 3 nodes will run cockroach, and the last will be the
// workload driver node.
MinVersion: "v20.1.0",
Cluster: makeClusterSpec(4),
Cluster: r.makeClusterSpec(4),
Run: runAlterPKBank,
})
r.Add(testSpec{
Expand All @@ -180,7 +180,7 @@ func registerAlterPK(r *testRegistry) {
// Use a 4 node cluster -- 3 nodes will run cockroach, and the last will be the
// workload driver node.
MinVersion: "v20.1.0",
Cluster: makeClusterSpec(4, cpu(32)),
Cluster: r.makeClusterSpec(4, cpu(32)),
Run: func(ctx context.Context, t *test, c Cluster) {
runAlterPKTPCC(ctx, t, c, 250 /* warehouses */, true /* expensiveChecks */)
},
Expand All @@ -191,7 +191,7 @@ func registerAlterPK(r *testRegistry) {
// Use a 4 node cluster -- 3 nodes will run cockroach, and the last will be the
// workload driver node.
MinVersion: "v20.1.0",
Cluster: makeClusterSpec(4, cpu(16)),
Cluster: r.makeClusterSpec(4, cpu(16)),
Run: func(ctx context.Context, t *test, c Cluster) {
runAlterPKTPCC(ctx, t, c, 500 /* warehouses */, false /* expensiveChecks */)
},
Expand Down
2 changes: 1 addition & 1 deletion pkg/cmd/roachtest/autoupgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ func registerAutoUpgrade(r *testRegistry) {
Name: `autoupgrade`,
Owner: OwnerKV,
MinVersion: "v19.1.0",
Cluster: makeClusterSpec(5),
Cluster: r.makeClusterSpec(5),
Run: func(ctx context.Context, t *test, c Cluster) {
pred, err := PredecessorVersion(r.buildVersion)
if err != nil {
Expand Down
11 changes: 6 additions & 5 deletions pkg/cmd/roachtest/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"strings"
"time"

"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/spec"
cloudstorage "github.com/cockroachdb/cockroach/pkg/storage/cloud"
"github.com/cockroachdb/cockroach/pkg/storage/cloud/amazon"
"github.com/cockroachdb/cockroach/pkg/util/log"
Expand Down Expand Up @@ -83,7 +84,7 @@ func importBankData(ctx context.Context, rows int, t *test, c Cluster) string {
func registerBackupNodeShutdown(r *testRegistry) {
// backupNodeRestartSpec runs a backup and randomly shuts down a node during
// the backup.
backupNodeRestartSpec := makeClusterSpec(4)
backupNodeRestartSpec := r.makeClusterSpec(4)
loadBackupData := func(ctx context.Context, t *test, c Cluster) string {
// This aught to be enough since this isn't a performance test.
rows := rows15GiB
Expand Down Expand Up @@ -177,7 +178,7 @@ func initBulkJobPerfArtifacts(ctx context.Context, testName string, timeout time

func registerBackup(r *testRegistry) {

backup2TBSpec := makeClusterSpec(10)
backup2TBSpec := r.makeClusterSpec(10)
r.Add(testSpec{
Name: fmt.Sprintf("backup/2TB/%s", backup2TBSpec),
Owner: OwnerBulkIO,
Expand Down Expand Up @@ -213,14 +214,14 @@ func registerBackup(r *testRegistry) {
},
})

KMSSpec := makeClusterSpec(3)
KMSSpec := r.makeClusterSpec(3)
r.Add(testSpec{
Name: fmt.Sprintf("backup/KMS/%s", KMSSpec.String()),
Owner: OwnerBulkIO,
Cluster: KMSSpec,
MinVersion: "v20.2.0",
Run: func(ctx context.Context, t *test, c Cluster) {
if cloud == gce {
if cloud == spec.GCE {
t.Skip("backupKMS roachtest is only configured to run on AWS", "")
}

Expand Down Expand Up @@ -336,7 +337,7 @@ func registerBackup(r *testRegistry) {
r.Add(testSpec{
Name: `backupTPCC`,
Owner: OwnerBulkIO,
Cluster: makeClusterSpec(3),
Cluster: r.makeClusterSpec(3),
Timeout: 1 * time.Hour,
Run: func(ctx context.Context, t *test, c Cluster) {
// Randomize starting with encryption-at-rest enabled.
Expand Down
4 changes: 2 additions & 2 deletions pkg/cmd/roachtest/cancel.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ func registerCancel(r *testRegistry) {
r.Add(testSpec{
Name: fmt.Sprintf("cancel/tpch/distsql/queries=%s,nodes=%d", queries, numNodes),
Owner: OwnerSQLQueries,
Cluster: makeClusterSpec(numNodes),
Cluster: r.makeClusterSpec(numNodes),
Run: func(ctx context.Context, t *test, c Cluster) {
runCancel(ctx, t, c, tpchQueriesToRun, true /* useDistsql */)
},
Expand All @@ -139,7 +139,7 @@ func registerCancel(r *testRegistry) {
r.Add(testSpec{
Name: fmt.Sprintf("cancel/tpch/local/queries=%s,nodes=%d", queries, numNodes),
Owner: OwnerSQLQueries,
Cluster: makeClusterSpec(numNodes),
Cluster: r.makeClusterSpec(numNodes),
Run: func(ctx context.Context, t *test, c Cluster) {
runCancel(ctx, t, c, tpchQueriesToRun, false /* useDistsql */)
},
Expand Down
20 changes: 10 additions & 10 deletions pkg/cmd/roachtest/cdc.go
Original file line number Diff line number Diff line change
Expand Up @@ -607,7 +607,7 @@ func registerCDC(r *testRegistry) {
r.Add(testSpec{
Name: "cdc/tpcc-1000",
Owner: OwnerCDC,
Cluster: makeClusterSpec(4, cpu(16)),
Cluster: r.makeClusterSpec(4, cpu(16)),
RequiresLicense: true,
Run: func(ctx context.Context, t *test, c Cluster) {
cdcBasicTest(ctx, t, c, cdcTestArgs{
Expand All @@ -622,7 +622,7 @@ func registerCDC(r *testRegistry) {
r.Add(testSpec{
Name: "cdc/tpcc-1000/sink=null",
Owner: OwnerCDC,
Cluster: makeClusterSpec(4, cpu(16)),
Cluster: r.makeClusterSpec(4, cpu(16)),
Tags: []string{"manual"},
RequiresLicense: true,
Run: func(ctx context.Context, t *test, c Cluster) {
Expand All @@ -639,7 +639,7 @@ func registerCDC(r *testRegistry) {
r.Add(testSpec{
Name: "cdc/initial-scan",
Owner: OwnerCDC,
Cluster: makeClusterSpec(4, cpu(16)),
Cluster: r.makeClusterSpec(4, cpu(16)),
RequiresLicense: true,
Run: func(ctx context.Context, t *test, c Cluster) {
cdcBasicTest(ctx, t, c, cdcTestArgs{
Expand All @@ -655,7 +655,7 @@ func registerCDC(r *testRegistry) {
r.Add(testSpec{
Name: "cdc/sink-chaos",
Owner: `cdc`,
Cluster: makeClusterSpec(4, cpu(16)),
Cluster: r.makeClusterSpec(4, cpu(16)),
RequiresLicense: true,
Run: func(ctx context.Context, t *test, c Cluster) {
cdcBasicTest(ctx, t, c, cdcTestArgs{
Expand All @@ -671,7 +671,7 @@ func registerCDC(r *testRegistry) {
r.Add(testSpec{
Name: "cdc/crdb-chaos",
Owner: `cdc`,
Cluster: makeClusterSpec(4, cpu(16)),
Cluster: r.makeClusterSpec(4, cpu(16)),
RequiresLicense: true,
Run: func(ctx context.Context, t *test, c Cluster) {
cdcBasicTest(ctx, t, c, cdcTestArgs{
Expand All @@ -692,7 +692,7 @@ func registerCDC(r *testRegistry) {
// TODO(mrtracy): This workload is designed to be running on a 20CPU nodes,
// but this cannot be allocated without some sort of configuration outside
// of this test. Look into it.
Cluster: makeClusterSpec(4, cpu(16)),
Cluster: r.makeClusterSpec(4, cpu(16)),
RequiresLicense: true,
Run: func(ctx context.Context, t *test, c Cluster) {
cdcBasicTest(ctx, t, c, cdcTestArgs{
Expand All @@ -708,7 +708,7 @@ func registerCDC(r *testRegistry) {
r.Add(testSpec{
Name: "cdc/cloud-sink-gcs/rangefeed=true",
Owner: `cdc`,
Cluster: makeClusterSpec(4, cpu(16)),
Cluster: r.makeClusterSpec(4, cpu(16)),
RequiresLicense: true,
Run: func(ctx context.Context, t *test, c Cluster) {
cdcBasicTest(ctx, t, c, cdcTestArgs{
Expand All @@ -729,7 +729,7 @@ func registerCDC(r *testRegistry) {
r.Add(testSpec{
Name: "cdc/kafka-auth",
Owner: `cdc`,
Cluster: makeClusterSpec(1),
Cluster: r.makeClusterSpec(1),
RequiresLicense: true,
Run: func(ctx context.Context, t *test, c Cluster) {
runCDCKafkaAuth(ctx, t, c)
Expand All @@ -738,7 +738,7 @@ func registerCDC(r *testRegistry) {
r.Add(testSpec{
Name: "cdc/bank",
Owner: `cdc`,
Cluster: makeClusterSpec(4),
Cluster: r.makeClusterSpec(4),
RequiresLicense: true,
Run: func(ctx context.Context, t *test, c Cluster) {
runCDCBank(ctx, t, c)
Expand All @@ -747,7 +747,7 @@ func registerCDC(r *testRegistry) {
r.Add(testSpec{
Name: "cdc/schemareg",
Owner: `cdc`,
Cluster: makeClusterSpec(1),
Cluster: r.makeClusterSpec(1),
RequiresLicense: true,
Run: func(ctx context.Context, t *test, c Cluster) {
runCDCSchemaRegistry(ctx, t, c)
Expand Down
2 changes: 1 addition & 1 deletion pkg/cmd/roachtest/clearrange.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ func registerClearRange(r *testRegistry) {
// to <3:30h but it varies.
Timeout: 5*time.Hour + 90*time.Minute,
MinVersion: "v19.1.0",
Cluster: makeClusterSpec(10, cpu(16)),
Cluster: r.makeClusterSpec(10, cpu(16)),
Run: func(ctx context.Context, t *test, c Cluster) {
runClearRange(ctx, t, c, checks)
},
Expand Down
2 changes: 1 addition & 1 deletion pkg/cmd/roachtest/clock_jump_crash.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ func registerClockJumpTests(r *testRegistry) {
Owner: OwnerKV,
// These tests muck with NTP, therefore we don't want the cluster reused
// by others.
Cluster: makeClusterSpec(1, reuseTagged("offset-injector")),
Cluster: r.makeClusterSpec(1, reuseTagged("offset-injector")),
Run: func(ctx context.Context, t *test, c Cluster) {
runClockJump(ctx, t, c, tc)
},
Expand Down
2 changes: 1 addition & 1 deletion pkg/cmd/roachtest/clock_monotonic.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ func registerClockMonotonicTests(r *testRegistry) {
Owner: OwnerKV,
// These tests muck with NTP, therefor we don't want the cluster reused by
// others.
Cluster: makeClusterSpec(1, reuseTagged("offset-injector")),
Cluster: r.makeClusterSpec(1, reuseTagged("offset-injector")),
Run: func(ctx context.Context, t *test, c Cluster) {
runClockMonotonicity(ctx, t, c, tc)
},
Expand Down
25 changes: 13 additions & 12 deletions pkg/cmd/roachtest/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import (
"github.com/armon/circbuf"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/logger"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/spec"
"github.com/cockroachdb/cockroach/pkg/util/contextutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/quotapool"
Expand All @@ -48,17 +49,14 @@ import (
"golang.org/x/sync/errgroup"
)

const (
aws = "aws"
gce = "gce"
azure = "azure"
)

var (
local bool
// TODO(tbg): this is redundant with --cloud==local. Make the --local flag an
// alias for `--cloud=local` and remove this variable.
local bool

cockroach string
libraryFilePaths []string
cloud = gce
cloud = spec.GCE
encrypt encryptValue = "false"
instanceType string
localSSDArg bool
Expand Down Expand Up @@ -197,6 +195,9 @@ func initBinariesAndLibraries() {
if clusterName == "local" {
local = true
}
if local {
cloud = spec.Local
}

cockroachDefault := "cockroach"
if !local {
Expand Down Expand Up @@ -707,22 +708,22 @@ func azureMachineType(cpus int) string {

func machineTypeFlag(machineType string) string {
switch cloud {
case aws:
case spec.AWS:
if isSSD(machineType) {
return "--aws-machine-type-ssd"
}
return "--aws-machine-type"
case gce:
case spec.GCE:
return "--gce-machine-type"
case azure:
case spec.Azure:
return "--azure-machine-type"
default:
panic(fmt.Sprintf("unsupported cloud: %s\n", cloud))
}
}

func isSSD(machineType string) bool {
if cloud != aws {
if cloud != spec.AWS {
panic("can only differentiate SSDs based on machine type on AWS")
}
if !localSSDArg {
Expand Down
Loading

0 comments on commit 7dc3736

Please sign in to comment.