From e034a9daa0857cf864b60fae88e7276ef8d4496f Mon Sep 17 00:00:00 2001 From: Renato Costa Date: Thu, 29 Feb 2024 11:03:56 -0500 Subject: [PATCH 1/3] roachtest: handle errors in `filepath.Walk` Previously, the function passed to `filepath.Walk` would ignore errors, which is wrong: when there is an error, the `info` argument is nil, which would cause roachtest to crash with a nil pointer dereference. In this commit, we first check if there was an error walking the directory, and return it if so. Epic: none Release note: None --- pkg/cmd/roachtest/cluster.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/cmd/roachtest/cluster.go b/pkg/cmd/roachtest/cluster.go index de8d618c8da8..550fa7c8f3d1 100644 --- a/pkg/cmd/roachtest/cluster.go +++ b/pkg/cmd/roachtest/cluster.go @@ -2164,6 +2164,9 @@ func (c *clusterImpl) RefetchCertsFromNode(ctx context.Context, node int) error } // Need to prevent world readable files or lib/pq will complain. return filepath.Walk(c.localCertsDir, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return errors.Wrap(err, "walking localCertsDir failed") + } if info.IsDir() { return nil } From dfe507de502f861b45e509c7204d874191b2c3e2 Mon Sep 17 00:00:00 2001 From: Renato Costa Date: Thu, 29 Feb 2024 11:08:20 -0500 Subject: [PATCH 2/3] roachtest: remove duplication of tenant-certs directory This introduces `CockroachTenantNodeDir`, reducing duplication of the directory where we keep tenant certificates, serving a purpose similar to `CockroachNodeCertsDir`, where all certificates are ultimately kept. We also add a suffix to the directory based on the virtual cluster ID, avoiding clashes in case several virtual clusters are created. Epic: none Release note: None --- pkg/roachprod/install/cluster_synced.go | 51 +++++++++++++++---------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/pkg/roachprod/install/cluster_synced.go b/pkg/roachprod/install/cluster_synced.go index d69e433b81b4..b008054c544c 100644 --- a/pkg/roachprod/install/cluster_synced.go +++ b/pkg/roachprod/install/cluster_synced.go @@ -554,10 +554,10 @@ func (c *SyncedCluster) Wipe(ctx context.Context, l *logger.Logger, preserveCert var cmd string if c.IsLocal() { // Not all shells like brace expansion, so we'll do it here - dirs := []string{"data", "logs"} + dirs := []string{"data*", "logs*"} if !preserveCerts { dirs = append(dirs, fmt.Sprintf("%s*", CockroachNodeCertsDir)) - dirs = append(dirs, "tenant-certs*") + dirs = append(dirs, fmt.Sprintf("%s*", CockroachNodeTenantCertsDir)) } for _, dir := range dirs { cmd += fmt.Sprintf(`rm -fr %s/%s ;`, c.localVMDir(node), dir) @@ -566,10 +566,13 @@ func (c *SyncedCluster) Wipe(ctx context.Context, l *logger.Logger, preserveCert rmCmds := []string{ `sudo find /mnt/data* -maxdepth 1 -type f -exec rm -f {} \;`, `sudo rm -fr /mnt/data*/{auxiliary,local,tmp,cassandra,cockroach,cockroach-temp*,mongo-data}`, - `sudo rm -fr logs`, + `sudo rm -fr logs* data*`, } if !preserveCerts { - rmCmds = append(rmCmds, fmt.Sprintf("sudo rm -fr %s*", CockroachNodeCertsDir), "sudo rm -fr tenant-certs*") + rmCmds = append(rmCmds, + fmt.Sprintf("sudo rm -fr %s*", CockroachNodeCertsDir), + fmt.Sprintf("sudo rm -fr %s*", CockroachNodeTenantCertsDir), + ) } cmd = strings.Join(rmCmds, " && ") @@ -1602,12 +1605,16 @@ install --mode 0600 "${tmp2}" ~/.ssh/authorized_keys const ( // CockroachNodeCertsDir is the certs directory that lives // on the cockroach node itself. - CockroachNodeCertsDir = "certs" - certsTarName = "certs.tar" - tenantCertsTarName = "tenant-certs.tar" - tenantCertFile = "client-tenant.%d.crt" + CockroachNodeCertsDir = "certs" + CockroachNodeTenantCertsDir = "tenant-certs" + certsTarName = "certs.tar" + tenantCertFile = "client-tenant.%d.crt" ) +func tenantCertsTarName(virtualClusterID int) string { + return fmt.Sprintf("%s-%d.tar", CockroachNodeTenantCertsDir, virtualClusterID) +} + // DistributeCerts will generate and distribute certificates to all the nodes. func (c *SyncedCluster) DistributeCerts(ctx context.Context, l *logger.Logger) error { if c.checkForCertificates(ctx, l) { @@ -1679,11 +1686,14 @@ func (c *SyncedCluster) DistributeTenantCerts( return err } - if err := hostCluster.createTenantCertBundle(ctx, l, tenantCertsTarName, virtualClusterID, nodeNames); err != nil { + certsTar := tenantCertsTarName(virtualClusterID) + if err := hostCluster.createTenantCertBundle( + ctx, l, tenantCertsTarName(virtualClusterID), virtualClusterID, nodeNames, + ); err != nil { return err } - tarfile, cleanup, err := hostCluster.getFileFromFirstNode(ctx, l, tenantCertsTarName) + tarfile, cleanup, err := hostCluster.getFileFromFirstNode(ctx, l, certsTar) if err != nil { return err } @@ -1712,24 +1722,25 @@ func (c *SyncedCluster) createTenantCertBundle( cmd += fmt.Sprintf(`cd %s ; `, c.localVMDir(1)) } cmd += fmt.Sprintf(` -CERT_DIR=tenant-certs/certs -CA_KEY=%[1]s/ca.key +CERT_DIR=%[1]s-%[5]d/certs +CA_KEY=%[2]s/ca.key rm -fr $CERT_DIR mkdir -p $CERT_DIR -cp %[1]s/ca.crt $CERT_DIR +cp %[2]s/ca.crt $CERT_DIR SHARED_ARGS="--certs-dir=$CERT_DIR --ca-key=$CA_KEY" -VERSION=$(%[2]s version --build-tag) +VERSION=$(%[3]s version --build-tag) VERSION=${VERSION::3} TENANT_SCOPE_OPT="" if [[ $VERSION = v22 ]]; then - TENANT_SCOPE_OPT="--tenant-scope %[4]d" + TENANT_SCOPE_OPT="--tenant-scope %[5]d" fi -%[2]s cert create-node %[3]s $SHARED_ARGS -%[2]s cert create-tenant-client %[4]d %[3]s $SHARED_ARGS -%[2]s cert create-client root $TENANT_SCOPE_OPT $SHARED_ARGS -tar cvf %[5]s $CERT_DIR +%[3]s cert create-node %[4]s $SHARED_ARGS +%[3]s cert create-tenant-client %[5]d %[4]s $SHARED_ARGS +%[3]s cert create-client root $TENANT_SCOPE_OPT $SHARED_ARGS +tar cvf %[6]s $CERT_DIR `, + CockroachNodeTenantCertsDir, CockroachNodeCertsDir, cockroachNodeBinary(c, node), strings.Join(nodeNames, " "), @@ -1791,7 +1802,7 @@ func (c *SyncedCluster) checkForTenantCertificates( if c.IsLocal() { dir = c.localVMDir(1) } - if !c.fileExistsOnFirstNode(ctx, l, filepath.Join(dir, tenantCertsTarName)) { + if !c.fileExistsOnFirstNode(ctx, l, filepath.Join(dir, tenantCertsTarName(virtualClusterID))) { return false } return c.fileExistsOnFirstNode(ctx, l, filepath.Join(c.CertsDir(1), fmt.Sprintf(tenantCertFile, virtualClusterID))) From 60e6b6ca38639e85dd78df1c1d21b432bb667dda Mon Sep 17 00:00:00 2001 From: Renato Costa Date: Thu, 29 Feb 2024 11:10:13 -0500 Subject: [PATCH 3/3] roachtest: port admission-control/multitenant-fairness to new API This ports the `admission-control/multitenant-fariness/*` set of tests to the 'official' virtual clusters roachprod API. Fixes: #117670. Release note: None --- .../admission_control_multitenant_fairness.go | 251 +++++++++--------- pkg/cmd/roachtest/tests/multitenant_utils.go | 7 - 2 files changed, 119 insertions(+), 139 deletions(-) diff --git a/pkg/cmd/roachtest/tests/admission_control_multitenant_fairness.go b/pkg/cmd/roachtest/tests/admission_control_multitenant_fairness.go index 5a0f01235c9e..1ef0c65dc2df 100644 --- a/pkg/cmd/roachtest/tests/admission_control_multitenant_fairness.go +++ b/pkg/cmd/roachtest/tests/admission_control_multitenant_fairness.go @@ -12,20 +12,21 @@ package tests import ( "context" - gosql "database/sql" "fmt" "math" + "sort" "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/grafana" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" "github.com/cockroachdb/cockroach/pkg/roachprod/install" "github.com/cockroachdb/cockroach/pkg/roachprod/prometheus" - "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" ) // This test sets up a single-node CRDB cluster on a 4vCPU machine, and 4 @@ -91,14 +92,13 @@ func registerMultiTenantFairness(r registry.Registry) { for _, s := range specs { s := s r.Add(registry.TestSpec{ - Name: fmt.Sprintf("admission-control/multitenant-fairness/%s", s.name), - Cluster: r.MakeClusterSpec(5), - Owner: registry.OwnerAdmissionControl, - Benchmark: true, - Leases: registry.MetamorphicLeases, - CompatibleClouds: registry.AllExceptAWS, - Suites: registry.Suites(registry.Weekly), - NonReleaseBlocker: false, + Name: fmt.Sprintf("admission-control/multitenant-fairness/%s", s.name), + Cluster: r.MakeClusterSpec(5), + Owner: registry.OwnerAdmissionControl, + Benchmark: true, + Leases: registry.MetamorphicLeases, + CompatibleClouds: registry.AllExceptAWS, + Suites: registry.Suites(registry.Weekly), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { runMultiTenantFairness(ctx, t, c, s) }, @@ -121,13 +121,7 @@ type multiTenantFairnessSpec struct { func runMultiTenantFairness( ctx context.Context, t test.Test, c cluster.Cluster, s multiTenantFairnessSpec, ) { - if c.Spec().NodeCount < 5 { - t.Fatalf("expected at least 5 nodes, found %d", c.Spec().NodeCount) - } - - numTenants := 4 - crdbNodeID := 1 - crdbNode := c.Node(crdbNodeID) + crdbNode := c.Node(1) if c.IsLocal() { s.duration = 30 * time.Second s.concurrency = func(i int) int { return 4 } @@ -142,7 +136,7 @@ func runMultiTenantFairness( } } - t.L().Printf("starting cockroach securely (<%s)", time.Minute) + t.L().Printf("starting cockroach (<%s)", time.Minute) c.Start(ctx, t.L(), option.DefaultStartOptsNoBackups(), install.MakeClusterSettings(), @@ -156,105 +150,78 @@ func runMultiTenantFairness( promCfg.WithCluster(crdbNode.InstallNodes()) promCfg.WithGrafanaDashboardJSON(grafana.MultiTenantFairnessGrafanaJSON) - setRateLimit := func(ctx context.Context, val int) { - db := c.Conn(ctx, t.L(), crdbNodeID) - defer db.Close() - - if _, err := db.ExecContext( - ctx, fmt.Sprintf("SET CLUSTER SETTING kv.tenant_rate_limiter.rate_limit = '%d'", val)); err != nil { - t.Fatalf("failed to set tenant rate limiter limit: %v", err) - } - } + systemConn := c.Conn(ctx, t.L(), crdbNode[0]) + defer systemConn.Close() - setRateLimit(ctx, 1_000_000) + const rateLimit = 1_000_000 - const ( - tenantBaseID = 11 - tenantBaseHTTPPort = 8081 - tenantBaseSQLPort = 26259 - ) - tenantHTTPPort := func(offset int) int { - if c.IsLocal() { - return tenantBaseHTTPPort + offset - } - return tenantBaseHTTPPort - } - tenantSQLPort := func(offset int) int { - if c.IsLocal() { - return tenantBaseSQLPort + offset - } - return tenantBaseSQLPort - } - tenantID := func(offset int) int { - return tenantBaseID + offset - } - setTenantResourceLimits := func(tenantID int) { - db := c.Conn(ctx, t.L(), crdbNodeID) - defer db.Close() - if _, err := db.ExecContext( - ctx, fmt.Sprintf( - "SELECT crdb_internal.update_tenant_resource_limits(%[1]d, 1000000000, 10000, 1000000, now(), 0)", tenantID)); err != nil { - t.Fatalf("failed to update tenant resource limits: %v", err) - } - } - tenantNodeID := func(idx int) int { - return idx + 2 + if _, err := systemConn.ExecContext( + ctx, fmt.Sprintf("SET CLUSTER SETTING kv.tenant_rate_limiter.rate_limit = '%d'", rateLimit), + ); err != nil { + t.Fatalf("failed to set tenant rate limiter limit: %v", err) } t.L().Printf("enabling child metrics (<%s)", 30*time.Second) - _, err := c.Conn(ctx, t.L(), crdbNodeID).Exec(`SET CLUSTER SETTING server.child_metrics.enabled = true`) + _, err := systemConn.ExecContext(ctx, `SET CLUSTER SETTING server.child_metrics.enabled = true`) require.NoError(t, err) - // Create the tenants. - t.L().Printf("initializing %d tenants (<%s)", numTenants, 5*time.Minute) - tenants := make([]*tenantNode, numTenants) - for i := 0; i < numTenants; i++ { - if !t.SkipInit() { - _, err := c.Conn(ctx, t.L(), 1).Exec(`SELECT crdb_internal.create_tenant($1::INT)`, tenantID(i)) - require.NoError(t, err) - } - - tenant := createTenantNode(ctx, t, c, - crdbNode, tenantID(i), tenantNodeID(i), tenantHTTPPort(i), tenantSQLPort(i)) - defer tenant.stop(ctx, t, c) + const sqlInstance = 0 + virtualClusters := map[string]option.NodeListOption{ + "app-fairness-n2": c.Node(2), + "app-fairness-n3": c.Node(3), + "app-fairness-n4": c.Node(4), + "app-fairness-n5": c.Node(5), + } - tenants[i] = tenant - tenant.start(ctx, t, c, "./cockroach") - setTenantResourceLimits(tenantID(i)) + virtualClusterNames := maps.Keys(virtualClusters) + sort.Strings(virtualClusterNames) - tenantNode := c.Node(tenantNodeID(i)) + t.L().Printf("initializing %d virtual clusters (<%s)", len(virtualClusters), 5*time.Minute) + for j, name := range virtualClusterNames { + node := virtualClusters[name] + c.StartServiceForVirtualCluster( + ctx, t.L(), node, + option.DefaultStartVirtualClusterOpts(name, sqlInstance), + install.MakeClusterSettings(), + ) - // Init kv on each tenant. - cmd := fmt.Sprintf("./cockroach workload init kv '%s'", tenant.secureURL()) - require.NoError(t, c.RunE(ctx, option.WithNodes(tenantNode), cmd)) + t.L().Printf("virtual cluster %q started on n%d", name, node[0]) + _, err := systemConn.ExecContext( + ctx, fmt.Sprintf("SELECT crdb_internal.update_tenant_resource_limits('%s', 1000000000, 10000, 1000000, now(), 0)", name), + ) + require.NoError(t, err) - promCfg.WithTenantPod(tenantNode.InstallNodes()[0], tenantID(i)) + promCfg.WithTenantPod(node.InstallNodes()[0], j+1) promCfg.WithScrapeConfigs( - prometheus.MakeWorkloadScrapeConfig(fmt.Sprintf("workload-tenant-%d", i), + prometheus.MakeWorkloadScrapeConfig(fmt.Sprintf("workload-tenant-%d", j+1), "/", makeWorkloadScrapeNodes( - tenantNode.InstallNodes()[0], + node.InstallNodes()[0], []workloadInstance{ { - nodes: c.Node(tenantNodeID(i)), + nodes: node, prometheusPort: 2112, }, })), ) + + initKV := fmt.Sprintf( + "%s workload init kv {pgurl:%d:%s:%d}", + test.DefaultCockroachPath, node[0], name, sqlInstance, + ) + + c.Run(ctx, option.WithNodes(node), initKV) } - t.Status(fmt.Sprintf("setting up prometheus/grafana (<%s)", 2*time.Minute)) + t.L().Printf("setting up prometheus/grafana (<%s)", 2*time.Minute) _, cleanupFunc := setupPrometheusForRoachtest(ctx, t, c, promCfg, nil) defer cleanupFunc() t.L().Printf("loading per-tenant data (<%s)", 10*time.Minute) - m1 := c.NewMonitor(ctx, crdbNode) - for i := 0; i < numTenants; i++ { - if t.SkipInit() { - continue - } - - i := i - pgurl := tenants[i].secureURL() + m1 := c.NewMonitor(ctx, c.All()) + for name, node := range virtualClusters { + pgurl := fmt.Sprintf("{pgurl:%d:%s:%d}", node[0], name, sqlInstance) + name := name + node := node m1.Go(func(ctx context.Context) error { // TODO(irfansharif): Occasionally we see SQL liveness errors of the // following form. See #78691, #97448. @@ -268,37 +235,56 @@ func runMultiTenantFairness( // session gets renewed shortly (within some jitter). We don't want // to --tolerate-errors here and below because we'd see total // throughput collapse. - cmd := fmt.Sprintf( - "./cockroach workload run kv '%s' --secure --min-block-bytes %d --max-block-bytes %d "+ - "--batch %d --max-ops %d --concurrency=25", - pgurl, s.blockSize, s.blockSize, s.batch, s.maxOps) - err := c.RunE(ctx, option.WithNodes(c.Node(tenantNodeID(i))), cmd) - t.L().Printf("loaded data for tenant %d", tenantID(i)) - return err + cmd := roachtestutil.NewCommand("%s workload run kv", test.DefaultCockroachPath). + Option("secure"). + Flag("min-block-bytes", s.blockSize). + Flag("max-block-bytes", s.blockSize). + Flag("batch", s.batch). + Flag("max-ops", s.maxOps). + Flag("concurrency", 25). + Arg(pgurl) + + if err := c.RunE(ctx, option.WithNodes(node), cmd.String()); err != nil { + return err + } + + t.L().Printf("loaded data for virtual cluster %q", name) + return nil }) } m1.Wait() - if !t.SkipInit() { - t.L().Printf("loaded data for all tenants, sleeping (<%s)", 2*time.Minute) - time.Sleep(2 * time.Minute) - } + waitDur := 2 * time.Minute + t.L().Printf("loaded data for all tenants, sleeping (<%s)", waitDur) + time.Sleep(waitDur) - t.L().Printf("running per-tenant workloads (<%s)", s.duration+time.Minute) + t.L().Printf("running virtual cluster workloads (<%s)", s.duration+time.Minute) m2 := c.NewMonitor(ctx, crdbNode) - for i := 0; i < numTenants; i++ { - i := i - pgurl := tenants[i].secureURL() + var n int + for name, node := range virtualClusters { + pgurl := fmt.Sprintf("{pgurl:%d:%s:%d}", node[0], name, sqlInstance) + n++ + + name := name + node := node m2.Go(func(ctx context.Context) error { - cmd := fmt.Sprintf( - "./cockroach workload run kv '%s' --write-seq=%s --secure --min-block-bytes %d "+ - "--max-block-bytes %d --batch %d --duration=%s --read-percent=%d --concurrency=%d", - pgurl, fmt.Sprintf("R%d", s.maxOps*s.batch), s.blockSize, s.blockSize, s.batch, - s.duration, s.readPercent, s.concurrency(tenantNodeID(i)-1)) - - err := c.RunE(ctx, option.WithNodes(c.Node(tenantNodeID(i))), cmd) - t.L().Printf("ran workload for tenant %d", tenantID(i)) - return err + cmd := roachtestutil.NewCommand("%s workload run kv", test.DefaultCockroachPath). + Option("secure"). + Flag("write-seq", fmt.Sprintf("R%d", s.maxOps*s.batch)). + Flag("min-block-bytes", s.blockSize). + Flag("max-block-bytes", s.blockSize). + Flag("batch", s.batch). + Flag("duration", s.duration). + Flag("read-percent", s.readPercent). + Flag("concurrency", s.concurrency(n)). + Arg(pgurl) + + if err := c.RunE(ctx, option.WithNodes(node), cmd.String()); err != nil { + return err + } + + t.L().Printf("ran workload for virtual cluster %q", name) + return nil }) } m2.Wait() @@ -313,40 +299,41 @@ func runMultiTenantFairness( // TODO(irfansharif): Aren't these stats getting polluted by the data-load // step? t.L().Printf("computing workload statistics (%s)", 30*time.Second) - counts := make([]float64, numTenants) - meanLatencies := make([]float64, numTenants) - for i := 0; i < numTenants; i++ { - i := i - db, err := gosql.Open("postgres", tenants[i].pgURL) - if err != nil { - t.Fatal(err) - } - defer func() { _ = db.Close() }() + counts := make([]float64, len(virtualClusters)) + meanLatencies := make([]float64, len(virtualClusters)) + for j, name := range virtualClusterNames { + node := virtualClusters[name] + + vcdb := c.Conn(ctx, t.L(), node[0], option.TenantName(name), option.SQLInstance(sqlInstance)) + defer vcdb.Close() - tdb := sqlutils.MakeSQLRunner(db) - tdb.Exec(t, "USE kv") + _, err := vcdb.ExecContext(ctx, "USE kv") + require.NoError(t, err) - rows := tdb.Query(t, ` + rows, err := vcdb.QueryContext(ctx, ` SELECT sum((statistics -> 'statistics' -> 'cnt')::INT), avg((statistics -> 'statistics' -> 'runLat' -> 'mean')::FLOAT) FROM crdb_internal.statement_statistics WHERE metadata @> '{"db":"kv","failed":false}' AND metadata @> $1`, fmt.Sprintf(`{"querySummary": "%s"}`, s.query)) + require.NoError(t, err) if rows.Next() { var cnt, lat float64 err := rows.Scan(&cnt, &lat) require.NoError(t, err) - counts[i] = cnt - meanLatencies[i] = lat + counts[j] = cnt + meanLatencies[j] = lat } else { t.Fatal("no query results") } + + require.NoError(t, rows.Err()) } failThreshold := .3 - throughput := make([]float64, numTenants) + throughput := make([]float64, len(virtualClusters)) ok, maxThroughputDelta := floatsWithinPercentage(counts, failThreshold) for i, count := range counts { throughput[i] = count / s.duration.Seconds() @@ -360,7 +347,7 @@ func runMultiTenantFairness( } ok, maxLatencyDelta := floatsWithinPercentage(meanLatencies, failThreshold) - t.L().Printf("max-latency-delta=d%% mean-latency-per-tenant=%v\n", int(maxLatencyDelta*100), meanLatencies) + t.L().Printf("max-latency-delta=%d% mean-latency-per-tenant=%v\n", int(maxLatencyDelta*100), meanLatencies) if !ok { // TODO(irfansharif): Same as above -- this is a weak assertion. t.L().Printf("latency not within expectations: %f > %f %v", maxLatencyDelta, failThreshold, meanLatencies) diff --git a/pkg/cmd/roachtest/tests/multitenant_utils.go b/pkg/cmd/roachtest/tests/multitenant_utils.go index c1314a4654c1..39ac17bf1ba5 100644 --- a/pkg/cmd/roachtest/tests/multitenant_utils.go +++ b/pkg/cmd/roachtest/tests/multitenant_utils.go @@ -174,13 +174,6 @@ func (tn *tenantNode) storeDir() string { return fmt.Sprintf("cockroach-data-mt-%d-%d", tn.tenantID, tn.instanceID) } -// In secure mode the url we get from roachprod contains ssl parameters with -// local file paths. secureURL returns a url with those changed to -// roachprod/workload friendly local paths, ie "certs". -func (tn *tenantNode) secureURL() string { - return tn.relativeSecureURL -} - func (tn *tenantNode) start(ctx context.Context, t test.Test, c cluster.Cluster, binary string) { require.True(t, c.IsSecure())