From ebe09e76f98a0613758f9f65a0e3b68343484d55 Mon Sep 17 00:00:00 2001 From: Evan Wall Date: Wed, 17 Aug 2022 18:41:12 -0400 Subject: [PATCH 1/5] ttl: replace SPLIT AT with SplitTable in tests refs https://github.com/cockroachdb/cockroach/issues/85800 SPLIT AT was causing occasional test failures if it did not asynchronously split the range before the TTL job started. SplitTable synchronously splits the range before the test starts the TTL job. Release justification: Fix test flake. Release note: None --- pkg/sql/exec_util.go | 6 +- pkg/sql/ttl/ttljob/ttljob.go | 11 +- pkg/sql/ttl/ttljob/ttljob_processor.go | 5 +- pkg/sql/ttl/ttljob/ttljob_test.go | 237 ++++++++++++++++--------- 4 files changed, 171 insertions(+), 88 deletions(-) diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index 6874898e7b6b..07326727d15b 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -1566,9 +1566,9 @@ type TTLTestingKnobs struct { // AOSTDuration changes the AOST timestamp duration to add to the // current time. AOSTDuration *time.Duration - // RequireMultipleSpanPartitions is a flag to verify that the DistSQL will - // distribute the work across multiple nodes. - RequireMultipleSpanPartitions bool + // ExpectedNumSpanPartitions causes the TTL job to fail if it does not match + // the number of DistSQL processors. + ExpectedNumSpanPartitions int // ReturnStatsError causes stats errors to be returned instead of logged as // warnings. ReturnStatsError bool diff --git a/pkg/sql/ttl/ttljob/ttljob.go b/pkg/sql/ttl/ttljob/ttljob.go index 1520d78d742f..77d9421e4a55 100644 --- a/pkg/sql/ttl/ttljob/ttljob.go +++ b/pkg/sql/ttl/ttljob/ttljob.go @@ -214,8 +214,15 @@ func (t rowLevelTTLResumer) Resume(ctx context.Context, execCtx interface{}) err if err != nil { return err } - if knobs.RequireMultipleSpanPartitions && len(spanPartitions) == 0 { - return errors.New("multiple span partitions required") + expectedNumSpanPartitions := knobs.ExpectedNumSpanPartitions + if expectedNumSpanPartitions != 0 { + actualNumSpanPartitions := len(spanPartitions) + if expectedNumSpanPartitions != actualNumSpanPartitions { + return errors.AssertionFailedf( + "incorrect number of span partitions expected=%d actual=%d", + expectedNumSpanPartitions, actualNumSpanPartitions, + ) + } } sqlInstanceIDToTTLSpec := make(map[base.SQLInstanceID]*execinfrapb.TTLSpec) diff --git a/pkg/sql/ttl/ttljob/ttljob_processor.go b/pkg/sql/ttl/ttljob/ttljob_processor.go index 00d51673ab71..05f749f70890 100644 --- a/pkg/sql/ttl/ttljob/ttljob_processor.go +++ b/pkg/sql/ttl/ttljob/ttljob_processor.go @@ -233,8 +233,9 @@ func (ttl *ttlProcessor) work(ctx context.Context) error { true, /* useReadLock */ func(_ *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { progress := md.Progress - existingRowCount := progress.Details.(*jobspb.Progress_RowLevelTTL).RowLevelTTL.RowCount - progress.Details.(*jobspb.Progress_RowLevelTTL).RowLevelTTL.RowCount += processorRowCount + rowLevelTTL := progress.Details.(*jobspb.Progress_RowLevelTTL).RowLevelTTL + existingRowCount := rowLevelTTL.RowCount + rowLevelTTL.RowCount += processorRowCount ju.UpdateProgress(progress) log.VInfof( ctx, diff --git a/pkg/sql/ttl/ttljob/ttljob_test.go b/pkg/sql/ttl/ttljob/ttljob_test.go index 8b29ad0b3928..e2608ecb5ca3 100644 --- a/pkg/sql/ttl/ttljob/ttljob_test.go +++ b/pkg/sql/ttl/ttljob/ttljob_test.go @@ -15,6 +15,7 @@ import ( "context" "fmt" "regexp" + "strconv" "strings" "testing" "time" @@ -46,6 +47,8 @@ import ( "github.com/stretchr/testify/require" ) +var zeroDuration time.Duration + type ttlServer interface { JobRegistry() interface{} } @@ -54,6 +57,7 @@ type rowLevelTTLTestJobTestHelper struct { server ttlServer env *jobstest.JobSchedulerTestEnv cfg *scheduledjobs.JobExecutionConfig + tc serverutils.TestClusterInterface sqlDB *sqlutils.SQLRunner kvDB *kv.DB executeSchedules func() error @@ -86,15 +90,21 @@ func newRowLevelTTLTestJobTestHelper( TTL: testingKnobs, } - // As `ALTER TABLE ... SPLIT AT ...` is not supported in multi-tenancy, we - // do not run those tests. + replicationMode := base.ReplicationAuto + if numNodes > 1 { + replicationMode = base.ReplicationManual + } tc := serverutils.StartNewTestCluster(t, numNodes, base.TestClusterArgs{ + ReplicationMode: replicationMode, ServerArgs: base.TestServerArgs{ Knobs: baseTestingKnobs, DisableWebSessionAuthentication: true, }, }) + th.tc = tc ts := tc.Server(0) + // As `ALTER TABLE ... SPLIT AT ...` is not supported in multi-tenancy, we + // do not run those tests. if testMultiTenant { tenantServer, db := serverutils.StartTenant( t, ts, base.TestTenantArgs{ @@ -127,6 +137,9 @@ func newRowLevelTTLTestJobTestHelper( func (h *rowLevelTTLTestJobTestHelper) waitForScheduledJob( t *testing.T, expectedStatus jobs.Status, expectedErrorRe string, ) { + h.env.SetTime(timeutil.Now().Add(time.Hour * 24)) + require.NoError(t, h.executeSchedules()) + query := fmt.Sprintf( `SELECT status, error FROM [SHOW JOBS] WHERE job_id IN ( @@ -162,8 +175,48 @@ func (h *rowLevelTTLTestJobTestHelper) waitForScheduledJob( }) } -func (h *rowLevelTTLTestJobTestHelper) waitForSuccessfulScheduledJob(t *testing.T) { - h.waitForScheduledJob(t, jobs.StatusSucceeded, "") +func (h *rowLevelTTLTestJobTestHelper) verifyNonExpiredRows( + t *testing.T, tableName string, expirationExpression string, expectedNumNonExpiredRows int, +) { + // Check we have the number of expected rows. + var actualNumNonExpiredRows int + h.sqlDB.QueryRow( + t, + fmt.Sprintf(`SELECT count(1) FROM %s`, tableName), + ).Scan(&actualNumNonExpiredRows) + require.Equal(t, expectedNumNonExpiredRows, actualNumNonExpiredRows) + + // Also check all the rows expire way into the future. + h.sqlDB.QueryRow( + t, + fmt.Sprintf(`SELECT count(1) FROM %s WHERE %s >= now()`, tableName, expirationExpression), + ).Scan(&actualNumNonExpiredRows) + require.Equal(t, expectedNumNonExpiredRows, actualNumNonExpiredRows) +} + +func (h *rowLevelTTLTestJobTestHelper) verifyExpiredRows(t *testing.T, expectedNumExpiredRows int) { + rows := h.sqlDB.Query(t, ` + SELECT sys_j.status, sys_j.progress + FROM crdb_internal.jobs AS crdb_j + JOIN system.jobs as sys_j ON crdb_j.job_id = sys_j.id + WHERE crdb_j.job_type = 'ROW LEVEL TTL' + `) + jobCount := 0 + for rows.Next() { + var status string + var progressBytes []byte + require.NoError(t, rows.Scan(&status, &progressBytes)) + + require.Equal(t, "succeeded", status) + + var progress jobspb.Progress + require.NoError(t, protoutil.Unmarshal(progressBytes, &progress)) + + actualNumExpiredRows := progress.UnwrapDetails().(jobspb.RowLevelTTLProgress).RowCount + require.Equal(t, int64(expectedNumExpiredRows), actualNumExpiredRows) + jobCount++ + } + require.Equal(t, 1, jobCount) } func TestRowLevelTTLNoTestingKnobs(t *testing.T) { @@ -182,9 +235,6 @@ func TestRowLevelTTLNoTestingKnobs(t *testing.T) { th.sqlDB.Exec(t, `INSERT INTO t (id, crdb_internal_expiration) VALUES (1, now() - '1 month')`) // Force the schedule to execute. - th.env.SetTime(timeutil.Now().Add(time.Hour * 24)) - require.NoError(t, th.executeSchedules()) - th.waitForScheduledJob(t, jobs.StatusFailed, `found a recent schema change on the table`) } @@ -215,7 +265,7 @@ INSERT INTO t (id, crdb_internal_expiration) VALUES (1, now() - '1 month'), (2, { desc: "schema change during job", expectedTTLError: "error during row deletion: table has had a schema change since the job has started at .*, aborting", - aostDuration: time.Duration(0), + aostDuration: zeroDuration, // We cannot use a schema change to change the version in this test as // we overtook the job adoption method, which means schema changes get // blocked and may not run. @@ -244,9 +294,6 @@ INSERT INTO t (id, crdb_internal_expiration) VALUES (1, now() - '1 month'), (2, th.sqlDB.Exec(t, createTable) // Force the schedule to execute. - th.env.SetTime(timeutil.Now().Add(time.Hour * 24)) - require.NoError(t, th.executeSchedules()) - th.waitForScheduledJob(t, jobs.StatusFailed, tc.expectedTTLError) }) } @@ -286,7 +333,6 @@ INSERT INTO t (id, crdb_internal_expiration) VALUES (1, now() - '1 month'), (2, for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { - var zeroDuration time.Duration th, cleanupFunc := newRowLevelTTLTestJobTestHelper( t, &sql.TTLTestingKnobs{ @@ -299,10 +345,8 @@ INSERT INTO t (id, crdb_internal_expiration) VALUES (1, now() - '1 month'), (2, th.sqlDB.ExecMultiple(t, strings.Split(tc.setup, ";")...) // Force the schedule to execute. - th.env.SetTime(timeutil.Now().Add(time.Hour * 24)) - require.NoError(t, th.executeSchedules()) - th.waitForScheduledJob(t, jobs.StatusFailed, tc.expectedTTLError) + var numRows int th.sqlDB.QueryRow(t, `SELECT count(1) FROM t`).Scan(&numRows) require.Equal(t, 2, numRows) @@ -310,6 +354,92 @@ INSERT INTO t (id, crdb_internal_expiration) VALUES (1, now() - '1 month'), (2, } } +func TestRowLevelTTLJobMultipleNodes(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + numNodes := 5 + th, cleanupFunc := newRowLevelTTLTestJobTestHelper( + t, + &sql.TTLTestingKnobs{ + AOSTDuration: &zeroDuration, + ReturnStatsError: true, + ExpectedNumSpanPartitions: 2, + }, + false, /* testMultiTenant */ // SPLIT AT does not work with multi-tenant + numNodes, /* numNodes */ + ) + defer cleanupFunc() + + sqlDB := th.sqlDB + + // create table + tableName := "tbl" + expirationExpr := "expire_at" + sqlDB.Exec(t, fmt.Sprintf( + `CREATE TABLE %s ( + id INT PRIMARY KEY, + expire_at TIMESTAMP + ) WITH (ttl_expiration_expression = '%s')`, + tableName, expirationExpr, + )) + + // split table + const splitAt = 10_000 + ranges := sqlDB.QueryStr(t, fmt.Sprintf( + `SHOW RANGES FROM TABLE %s`, + tableName, + )) + require.Equal(t, 1, len(ranges)) + leaseHolderIdx, err := strconv.Atoi(ranges[0][4]) + require.NoError(t, err) + tableDesc := desctestutils.TestingGetPublicTableDescriptor( + th.kvDB, + keys.SystemSQLCodec, + "defaultdb", /* database */ + tableName, + ) + newLeaseHolderIdx := leaseHolderIdx + 1 + if newLeaseHolderIdx == numNodes { + newLeaseHolderIdx = 0 + } + th.tc.SplitTable(t, tableDesc, []serverutils.SplitPoint{{ + TargetNodeIdx: newLeaseHolderIdx, + Vals: []interface{}{splitAt}, + }}) + newRanges := sqlDB.QueryStr(t, fmt.Sprintf( + `SHOW RANGES FROM TABLE %s`, + tableName, + )) + require.Equal(t, 2, len(newRanges)) + + // populate table - even pk is non-expired, odd pk is expired + expectedNumNonExpiredRows := 0 + expectedNumExpiredRows := 0 + ts := timeutil.Now() + nonExpiredTs := ts.Add(time.Hour * 24 * 30) + expiredTs := ts.Add(-time.Hour) + const rowsPerRange = 10 + const insertStatement = `INSERT INTO tbl VALUES ($1, $2)` + for _, offset := range []int{0, splitAt} { // insert into both ranges + for i := offset; i < offset+rowsPerRange; { + sqlDB.Exec(t, insertStatement, i, nonExpiredTs) + i++ + expectedNumNonExpiredRows++ + sqlDB.Exec(t, insertStatement, i, expiredTs) + i++ + expectedNumExpiredRows++ + } + } + + // Force the schedule to execute. + th.waitForScheduledJob(t, jobs.StatusSucceeded, "") + + th.verifyNonExpiredRows(t, tableName, expirationExpr, expectedNumNonExpiredRows) + + th.verifyExpiredRows(t, expectedNumExpiredRows) +} + // TestRowLevelTTLJobRandomEntries inserts random entries into a given table // and runs a TTL job on them. func TestRowLevelTTLJobRandomEntries(t *testing.T) { @@ -335,7 +465,6 @@ func TestRowLevelTTLJobRandomEntries(t *testing.T) { numNonExpiredRows int numSplits int forceNonMultiTenant bool - numNodes int expirationExpression string addRow func(th *rowLevelTTLTestJobTestHelper, createTableStmt *tree.CreateTable, ts time.Time) } @@ -350,17 +479,6 @@ func TestRowLevelTTLJobRandomEntries(t *testing.T) { numExpiredRows: 1001, numNonExpiredRows: 5, }, - { - desc: "one column pk multiple nodes", - createTable: `CREATE TABLE tbl ( - id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - text TEXT -) WITH (ttl_expire_after = '30 days')`, - numExpiredRows: 1001, - numNonExpiredRows: 5, - numNodes: 5, - numSplits: 10, - }, { desc: "one column pk, table ranges overlap", createTable: `CREATE TABLE tbl ( @@ -470,7 +588,7 @@ func TestRowLevelTTLJobRandomEntries(t *testing.T) { numExpiredRows: 1001, numNonExpiredRows: 5, expirationExpression: "expire_at", - addRow: func(th *rowLevelTTLTestJobTestHelper, createTableStmt *tree.CreateTable, ts time.Time) { + addRow: func(th *rowLevelTTLTestJobTestHelper, _ *tree.CreateTable, ts time.Time) { th.sqlDB.Exec( t, "INSERT INTO tbl (expire_at) VALUES ($1)", @@ -544,21 +662,14 @@ func TestRowLevelTTLJobRandomEntries(t *testing.T) { t.Run(tc.desc, func(t *testing.T) { // Log to make it slightly easier to reproduce a random config. t.Logf("test case: %#v", tc) - - var zeroDuration time.Duration - numNodes := tc.numNodes - if numNodes == 0 { - numNodes = 1 - } th, cleanupFunc := newRowLevelTTLTestJobTestHelper( t, &sql.TTLTestingKnobs{ - AOSTDuration: &zeroDuration, - ReturnStatsError: true, - RequireMultipleSpanPartitions: tc.numNodes > 0, // require if there is more than 1 node in tc + AOSTDuration: &zeroDuration, + ReturnStatsError: true, }, tc.numSplits == 0 && !tc.forceNonMultiTenant, // SPLIT AT does not work with multi-tenant - numNodes, /* numNodes */ + 1, /* numNodes */ ) defer cleanupFunc() @@ -619,6 +730,7 @@ func TestRowLevelTTLJobRandomEntries(t *testing.T) { } // Add expired and non-expired rows. + for i := 0; i < tc.numExpiredRows; i++ { addRow(th, createTableStmt, timeutil.Now().Add(-time.Hour)) } @@ -632,54 +744,17 @@ func TestRowLevelTTLJobRandomEntries(t *testing.T) { } // Force the schedule to execute. - th.env.SetTime(timeutil.Now().Add(time.Hour * 24)) - require.NoError(t, th.executeSchedules()) - - th.waitForSuccessfulScheduledJob(t) - - table := createTableStmt.Table.Table() + th.waitForScheduledJob(t, jobs.StatusSucceeded, "") - // Check we have the number of expected rows. - var numRows int - th.sqlDB.QueryRow( - t, - fmt.Sprintf(`SELECT count(1) FROM %s`, table), - ).Scan(&numRows) - require.Equal(t, tc.numNonExpiredRows, numRows) - - // Also check all the rows expire way into the future. + tableName := createTableStmt.Table.Table() expirationExpression := "crdb_internal_expiration" if tc.expirationExpression != "" { expirationExpression = tc.expirationExpression } - th.sqlDB.QueryRow( - t, - fmt.Sprintf(`SELECT count(1) FROM %s WHERE %s >= now()`, table, expirationExpression), - ).Scan(&numRows) - require.Equal(t, tc.numNonExpiredRows, numRows) - - rows := th.sqlDB.Query(t, ` -SELECT sys_j.status, sys_j.progress -FROM crdb_internal.jobs AS crdb_j -JOIN system.jobs as sys_j ON crdb_j.job_id = sys_j.id -WHERE crdb_j.job_type = 'ROW LEVEL TTL' -`) - jobCount := 0 - for rows.Next() { - var status string - var progressBytes []byte - require.NoError(t, rows.Scan(&status, &progressBytes)) - - require.Equal(t, "succeeded", status) - - var progress jobspb.Progress - require.NoError(t, protoutil.Unmarshal(progressBytes, &progress)) - - rowLevelTTLProgress := progress.UnwrapDetails().(jobspb.RowLevelTTLProgress) - require.Equal(t, int64(tc.numExpiredRows), rowLevelTTLProgress.RowCount) - jobCount++ - } - require.Equal(t, 1, jobCount) + + th.verifyNonExpiredRows(t, tableName, expirationExpression, tc.numNonExpiredRows) + + th.verifyExpiredRows(t, tc.numExpiredRows) }) } } From 8b149a0105eb1830c11b70c65638b18e1e8aea41 Mon Sep 17 00:00:00 2001 From: Aditya Maru Date: Wed, 17 Aug 2022 17:18:45 -0400 Subject: [PATCH 2/5] externalconn: grant `ALL` on `CREATE EXTERNAL CONNECTION` Previously, the user that created the external connection was not granted any privileges as the creator/owner of the object. This diff changes this by granting the user `ALL` privileges. When synthetic privileges have a concept of owners, we should also set the owner of the External Connection object to the creator. This can happen once https://github.com/cockroachdb/cockroach/issues/86181 is addressed. There was also a small bug in the grant/revoke code where external connection names that required to be wrapped in `""` egs: `"foo-kms"` were being incorrectly stringified when creating the synthetic privilege path. This resulted in: ``` CREATE EXTERNAL CONNECTION "foo-kms" AS ... GRANT USAGE ON EXTERNAL CONNECTION "foo-kms" TO baz ``` To fail with a "foo-kms" cannot be found, even though it was infact created before attempting the grant. Tests have been added to test this case. Fixes: #86261 Release note (bug fix): Users that create an external connection are now granted `ALL` privileges on the object. --- .../external-connections-privileges | 40 ++++-- pkg/ccl/cloudccl/externalconn/BUILD.bazel | 1 + .../cloudccl/externalconn/datadriven_test.go | 1 + .../testdata/create_drop_external_connection | 85 +----------- .../create_drop_external_connection | 85 +----------- .../privileges_external_connection | 126 ++++++++++++++++++ .../testdata/privileges_external_connection | 123 +++++++++++++++++ pkg/sql/create_external_connection.go | 18 ++- pkg/sql/grant_revoke_system.go | 4 +- .../logic_test/external_connection_privileges | 3 + .../testdata/logic_test/system_privileges | 20 ++- 11 files changed, 316 insertions(+), 190 deletions(-) create mode 100644 pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/privileges_external_connection create mode 100644 pkg/ccl/cloudccl/externalconn/testdata/privileges_external_connection diff --git a/pkg/ccl/backupccl/testdata/backup-restore/external-connections-privileges b/pkg/ccl/backupccl/testdata/backup-restore/external-connections-privileges index 9ea95acdcb76..94219faa7210 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/external-connections-privileges +++ b/pkg/ccl/backupccl/testdata/backup-restore/external-connections-privileges @@ -12,7 +12,7 @@ CREATE EXTERNAL CONNECTION root AS 'nodelocal://1/root' ---- exec-sql user=testuser -CREATE EXTERNAL CONNECTION fails AS 'userfile:///noprivs' +CREATE EXTERNAL CONNECTION "testuser-ec" AS 'userfile:///noprivs' ---- pq: only users with the EXTERNALCONNECTION system privilege are allowed to CREATE EXTERNAL CONNECTION @@ -21,7 +21,7 @@ GRANT SYSTEM EXTERNALCONNECTION TO testuser; ---- exec-sql user=testuser -CREATE EXTERNAL CONNECTION fails AS 'nodelocal://1/privs' +CREATE EXTERNAL CONNECTION "testuser-ec" AS 'nodelocal://1/privs' ---- exec-sql @@ -32,17 +32,17 @@ exec-sql GRANT SELECT ON TABLE foo TO testuser ---- +# Since testuser created the External Connection they have `ALL` privileges on the object. exec-sql user=testuser -BACKUP TABLE foo INTO 'external://fails' +BACKUP TABLE foo INTO 'external://testuser-ec' ---- -pq: user testuser does not have USAGE privilege on external_connection fails exec-sql -GRANT USAGE ON EXTERNAL CONNECTION fails TO testuser; +GRANT USAGE ON EXTERNAL CONNECTION "testuser-ec" TO testuser; ---- exec-sql user=testuser -BACKUP TABLE foo INTO 'external://fails' +BACKUP TABLE foo INTO LATEST IN 'external://testuser-ec' ---- # Sanity check that the user can't write to any other external connection. @@ -51,18 +51,34 @@ BACKUP TABLE foo INTO 'external://root' ---- pq: user testuser does not have USAGE privilege on external_connection root -# Revoke the USAGE privilege to test that restore also requires it. +query-sql +SELECT * FROM system.privileges +---- +root /externalconn/root {ALL} {} +testuser /externalconn/testuser-ec {ALL} {} +testuser /global/ {EXTERNALCONNECTION} {} + +# Revoke the USAGE privilege. Note testuser had ALL privileges since they +# created the External Connection, but revoking USAGE means that they will now +# only have DROP privileges. Thus, they shouldn't be able to restore. exec-sql -REVOKE USAGE ON EXTERNAL CONNECTION fails FROM testuser; +REVOKE USAGE ON EXTERNAL CONNECTION "testuser-ec" FROM testuser; +---- + +query-sql +SELECT * FROM system.privileges ---- +root /externalconn/root {ALL} {} +testuser /externalconn/testuser-ec {DROP} {} +testuser /global/ {EXTERNALCONNECTION} {} exec-sql user=testuser -RESTORE TABLE foo FROM LATEST IN 'external://fails' +RESTORE TABLE foo FROM LATEST IN 'external://testuser-ec' ---- -pq: user testuser does not have USAGE privilege on external_connection fails +pq: user testuser does not have USAGE privilege on external_connection testuser-ec exec-sql -GRANT USAGE ON EXTERNAL CONNECTION fails TO testuser; +GRANT USAGE ON EXTERNAL CONNECTION "testuser-ec" TO testuser; ---- exec-sql @@ -71,7 +87,7 @@ GRANT CREATE ON DATABASE failsdb TO testuser; ---- exec-sql user=testuser -RESTORE TABLE foo FROM LATEST IN 'external://fails' WITH into_db=failsdb; +RESTORE TABLE foo FROM LATEST IN 'external://testuser-ec' WITH into_db=failsdb; ---- subtest end diff --git a/pkg/ccl/cloudccl/externalconn/BUILD.bazel b/pkg/ccl/cloudccl/externalconn/BUILD.bazel index d99abb585365..e57f5a4adc5f 100644 --- a/pkg/ccl/cloudccl/externalconn/BUILD.bazel +++ b/pkg/ccl/cloudccl/externalconn/BUILD.bazel @@ -10,6 +10,7 @@ go_test( data = glob(["testdata/**"]), deps = [ "//pkg/base", + "//pkg/ccl/backupccl", "//pkg/ccl/changefeedccl", "//pkg/ccl/kvccl/kvtenantccl", "//pkg/cloud/externalconn", diff --git a/pkg/ccl/cloudccl/externalconn/datadriven_test.go b/pkg/ccl/cloudccl/externalconn/datadriven_test.go index 810c08b6d323..e821154efb4b 100644 --- a/pkg/ccl/cloudccl/externalconn/datadriven_test.go +++ b/pkg/ccl/cloudccl/externalconn/datadriven_test.go @@ -15,6 +15,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + _ "github.com/cockroachdb/cockroach/pkg/ccl/backupccl" _ "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl" "github.com/cockroachdb/cockroach/pkg/cloud/externalconn" _ "github.com/cockroachdb/cockroach/pkg/cloud/externalconn/providers" // register all the concrete External Connection implementations diff --git a/pkg/ccl/cloudccl/externalconn/testdata/create_drop_external_connection b/pkg/ccl/cloudccl/externalconn/testdata/create_drop_external_connection index 8c581d3778c0..f85c40811d3e 100644 --- a/pkg/ccl/cloudccl/externalconn/testdata/create_drop_external_connection +++ b/pkg/ccl/cloudccl/externalconn/testdata/create_drop_external_connection @@ -23,7 +23,7 @@ pq: failed to construct External Connection details: failed to create nodelocal exec-sql CREATE EXTERNAL CONNECTION foo AS 'nodelocal://1/foo'; ---- -pq: external connection with connection name 'foo' already exists +pq: failed to create external connection: external connection with connection name 'foo' already exists # Create another External Connection with a unique name. exec-sql @@ -57,89 +57,6 @@ inspect-system-table subtest end -subtest create-external-connection-global-privilege - -exec-sql -CREATE USER testuser; ----- - -exec-sql user=testuser -CREATE EXTERNAL CONNECTION privileged AS 'nodelocal://1/foo' ----- -pq: only users with the EXTERNALCONNECTION system privilege are allowed to CREATE EXTERNAL CONNECTION - -exec-sql -GRANT SYSTEM EXTERNALCONNECTION TO testuser; ----- - -exec-sql user=testuser -CREATE EXTERNAL CONNECTION privileged AS 'nodelocal://1/foo' ----- - -inspect-system-table ----- -privileged STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} - -exec-sql -DROP EXTERNAL CONNECTION privileged; ----- - -exec-sql -REVOKE SYSTEM EXTERNALCONNECTION FROM testuser; ----- - -exec-sql user=testuser -CREATE EXTERNAL CONNECTION privileged AS 'nodelocal://1/foo' ----- -pq: only users with the EXTERNALCONNECTION system privilege are allowed to CREATE EXTERNAL CONNECTION - -subtest end - -subtest drop-external-storage-privilege - -exec-sql -CREATE EXTERNAL CONNECTION privileged AS 'nodelocal://1/foo' ----- - -# Create another External Connection. -exec-sql -CREATE EXTERNAL CONNECTION 'privileged-dup' AS 'nodelocal://1/foo' ----- - -exec-sql user=testuser -DROP EXTERNAL CONNECTION privileged ----- -pq: user testuser does not have DROP privilege on external_connection privileged - -inspect-system-table ----- -privileged STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} -privileged-dup STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} - -exec-sql -GRANT DROP ON EXTERNAL CONNECTION privileged TO testuser; ----- - -exec-sql user=testuser -DROP EXTERNAL CONNECTION privileged ----- - -# Try to drop the second external connection, testuser should be disallowed. -exec-sql user=testuser -DROP EXTERNAL CONNECTION 'privileged-dup' ----- -pq: user testuser does not have DROP privilege on external_connection privileged-dup - -inspect-system-table ----- -privileged-dup STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} - -exec-sql -DROP EXTERNAL CONNECTION 'privileged-dup' ----- - -subtest end - subtest basic-gcp-kms disable-check-kms diff --git a/pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/create_drop_external_connection b/pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/create_drop_external_connection index 7141e55466ab..497c83132a4e 100644 --- a/pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/create_drop_external_connection +++ b/pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/create_drop_external_connection @@ -26,7 +26,7 @@ pq: failed to construct External Connection details: failed to create nodelocal exec-sql CREATE EXTERNAL CONNECTION foo AS 'nodelocal://1/foo'; ---- -pq: external connection with connection name 'foo' already exists +pq: failed to create external connection: external connection with connection name 'foo' already exists # Create another External Connection with a unique name. exec-sql @@ -60,89 +60,6 @@ inspect-system-table subtest end -subtest create-external-connection-global-privilege - -exec-sql -CREATE USER testuser; ----- - -exec-sql user=testuser -CREATE EXTERNAL CONNECTION privileged AS 'nodelocal://1/foo' ----- -pq: only users with the EXTERNALCONNECTION system privilege are allowed to CREATE EXTERNAL CONNECTION - -exec-sql -GRANT SYSTEM EXTERNALCONNECTION TO testuser; ----- - -exec-sql user=testuser -CREATE EXTERNAL CONNECTION privileged AS 'nodelocal://1/foo' ----- - -inspect-system-table ----- -privileged STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} - -exec-sql -DROP EXTERNAL CONNECTION privileged; ----- - -exec-sql -REVOKE SYSTEM EXTERNALCONNECTION FROM testuser; ----- - -exec-sql user=testuser -CREATE EXTERNAL CONNECTION privileged AS 'nodelocal://1/foo' ----- -pq: only users with the EXTERNALCONNECTION system privilege are allowed to CREATE EXTERNAL CONNECTION - -subtest end - -subtest drop-external-storage-privilege - -exec-sql -CREATE EXTERNAL CONNECTION privileged AS 'nodelocal://1/foo' ----- - -# Create another External Connection. -exec-sql -CREATE EXTERNAL CONNECTION 'privileged-dup' AS 'nodelocal://1/foo' ----- - -exec-sql user=testuser -DROP EXTERNAL CONNECTION privileged ----- -pq: user testuser does not have DROP privilege on external_connection privileged - -inspect-system-table ----- -privileged STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} -privileged-dup STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} - -exec-sql -GRANT DROP ON EXTERNAL CONNECTION privileged TO testuser; ----- - -exec-sql user=testuser -DROP EXTERNAL CONNECTION privileged ----- - -# Try to drop the second external connection, testuser should be disallowed. -exec-sql user=testuser -DROP EXTERNAL CONNECTION 'privileged-dup' ----- -pq: user testuser does not have DROP privilege on external_connection privileged-dup - -inspect-system-table ----- -privileged-dup STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} - -exec-sql -DROP EXTERNAL CONNECTION 'privileged-dup' ----- - -subtest end - subtest basic-gs-kms disable-check-kms diff --git a/pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/privileges_external_connection b/pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/privileges_external_connection new file mode 100644 index 000000000000..5d30e8ddb252 --- /dev/null +++ b/pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/privileges_external_connection @@ -0,0 +1,126 @@ +subtest create-external-connection-global-privilege + +initialize tenant=10 +---- + +exec-sql +CREATE USER testuser; +---- + +exec-sql user=testuser +CREATE EXTERNAL CONNECTION "global-privileged" AS 'nodelocal://1/foo' +---- +pq: only users with the EXTERNALCONNECTION system privilege are allowed to CREATE EXTERNAL CONNECTION + +exec-sql +GRANT SYSTEM EXTERNALCONNECTION TO testuser; +---- + +exec-sql user=testuser +CREATE EXTERNAL CONNECTION "global-privileged" AS 'nodelocal://1/foo' +---- + +inspect-system-table +---- +global-privileged STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} + +exec-sql +DROP EXTERNAL CONNECTION "global-privileged"; +---- + +exec-sql +REVOKE SYSTEM EXTERNALCONNECTION FROM testuser; +---- + +exec-sql user=testuser +CREATE EXTERNAL CONNECTION "global-privileged" AS 'nodelocal://1/foo' +---- +pq: only users with the EXTERNALCONNECTION system privilege are allowed to CREATE EXTERNAL CONNECTION + +subtest end + +subtest drop-external-storage-privilege + +exec-sql +CREATE EXTERNAL CONNECTION "drop-privileged" AS 'nodelocal://1/foo' +---- + +# Create another External Connection. +exec-sql +CREATE EXTERNAL CONNECTION 'drop-privileged-dup' AS 'nodelocal://1/foo' +---- + +exec-sql user=testuser +DROP EXTERNAL CONNECTION "drop-privileged" +---- +pq: user testuser does not have DROP privilege on external_connection drop-privileged + +inspect-system-table +---- +drop-privileged STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} +drop-privileged-dup STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} + +exec-sql +GRANT DROP ON EXTERNAL CONNECTION "drop-privileged" TO testuser; +---- + +exec-sql user=testuser +DROP EXTERNAL CONNECTION "drop-privileged" +---- + +# Try to drop the second external connection, testuser should be disallowed. +exec-sql user=testuser +DROP EXTERNAL CONNECTION 'drop-privileged-dup' +---- +pq: user testuser does not have DROP privilege on external_connection drop-privileged-dup + +inspect-system-table +---- +drop-privileged-dup STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} + +exec-sql +DROP EXTERNAL CONNECTION 'drop-privileged-dup' +---- + +subtest end + +subtest create-grants-all + +# Reset the user. +exec-sql +DROP USER testuser +---- + +exec-sql +CREATE USER testuser +---- + +exec-sql +GRANT SYSTEM EXTERNALCONNECTION TO testuser +---- + +# Create an EC as root, testuser cannot use this. +exec-sql +CREATE EXTERNAL CONNECTION root AS 'userfile:///foo' +---- + +exec-sql user=testuser +CREATE TABLE foo (id INT) +---- + +exec-sql user=testuser +BACKUP TABLE foo INTO 'external://foo' +---- +pq: user testuser does not have USAGE privilege on external_connection foo + +# Now create an EC as testuser, they should be able to use this EC since on +# creation they are given `ALL` privileges. +exec-sql user=testuser +CREATE EXTERNAL CONNECTION 'not-root' AS 'userfile:///bar' +---- + +exec-sql user=testuser +BACKUP TABLE foo INTO 'external://not-root' +---- + +subtest end diff --git a/pkg/ccl/cloudccl/externalconn/testdata/privileges_external_connection b/pkg/ccl/cloudccl/externalconn/testdata/privileges_external_connection new file mode 100644 index 000000000000..3b1c951ce2e6 --- /dev/null +++ b/pkg/ccl/cloudccl/externalconn/testdata/privileges_external_connection @@ -0,0 +1,123 @@ +subtest create-external-connection-global-privilege + +exec-sql +CREATE USER testuser; +---- + +exec-sql user=testuser +CREATE EXTERNAL CONNECTION "global-privileged" AS 'nodelocal://1/foo' +---- +pq: only users with the EXTERNALCONNECTION system privilege are allowed to CREATE EXTERNAL CONNECTION + +exec-sql +GRANT SYSTEM EXTERNALCONNECTION TO testuser; +---- + +exec-sql user=testuser +CREATE EXTERNAL CONNECTION "global-privileged" AS 'nodelocal://1/foo' +---- + +inspect-system-table +---- +global-privileged STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} + +exec-sql +DROP EXTERNAL CONNECTION "global-privileged"; +---- + +exec-sql +REVOKE SYSTEM EXTERNALCONNECTION FROM testuser; +---- + +exec-sql user=testuser +CREATE EXTERNAL CONNECTION "global-privileged" AS 'nodelocal://1/foo' +---- +pq: only users with the EXTERNALCONNECTION system privilege are allowed to CREATE EXTERNAL CONNECTION + +subtest end + +subtest drop-external-storage-privilege + +exec-sql +CREATE EXTERNAL CONNECTION "drop-privileged" AS 'nodelocal://1/foo' +---- + +# Create another External Connection. +exec-sql +CREATE EXTERNAL CONNECTION 'drop-privileged-dup' AS 'nodelocal://1/foo' +---- + +exec-sql user=testuser +DROP EXTERNAL CONNECTION "drop-privileged" +---- +pq: user testuser does not have DROP privilege on external_connection drop-privileged + +inspect-system-table +---- +drop-privileged STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} +drop-privileged-dup STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} + +exec-sql +GRANT DROP ON EXTERNAL CONNECTION "drop-privileged" TO testuser; +---- + +exec-sql user=testuser +DROP EXTERNAL CONNECTION "drop-privileged" +---- + +# Try to drop the second external connection, testuser should be disallowed. +exec-sql user=testuser +DROP EXTERNAL CONNECTION 'drop-privileged-dup' +---- +pq: user testuser does not have DROP privilege on external_connection drop-privileged-dup + +inspect-system-table +---- +drop-privileged-dup STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} + +exec-sql +DROP EXTERNAL CONNECTION 'drop-privileged-dup' +---- + +subtest end + +subtest create-grants-all + +# Reset the user. +exec-sql +DROP USER testuser +---- + +exec-sql +CREATE USER testuser +---- + +exec-sql +GRANT SYSTEM EXTERNALCONNECTION TO testuser +---- + +# Create an EC as root, testuser cannot use this. +exec-sql +CREATE EXTERNAL CONNECTION root AS 'userfile:///foo' +---- + +exec-sql user=testuser +CREATE TABLE foo (id INT) +---- + +exec-sql user=testuser +BACKUP TABLE foo INTO 'external://foo' +---- +pq: user testuser does not have USAGE privilege on external_connection foo + +# Now create an EC as testuser, they should be able to use this EC since on +# creation they are given `ALL` privileges. +exec-sql user=testuser +CREATE EXTERNAL CONNECTION 'not-root' AS 'userfile:///bar' +---- + +exec-sql user=testuser +BACKUP TABLE foo INTO 'external://not-root' +---- + +subtest end diff --git a/pkg/sql/create_external_connection.go b/pkg/sql/create_external_connection.go index 54c15797d741..958dc26e100d 100644 --- a/pkg/sql/create_external_connection.go +++ b/pkg/sql/create_external_connection.go @@ -12,13 +12,16 @@ package sql import ( "context" + "fmt" "github.com/cockroachdb/cockroach/pkg/cloud/externalconn" "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/syntheticprivilege" "github.com/cockroachdb/errors" ) @@ -110,7 +113,20 @@ func (p *planner) createExternalConnection( // Create the External Connection and persist it in the // `system.external_connections` table. - return ex.Create(params.ctx, params.ExecCfg().InternalExecutor, params.p.User(), p.Txn()) + if err := ex.Create(params.ctx, params.ExecCfg().InternalExecutor, params.p.User(), p.Txn()); err != nil { + return errors.Wrap(err, "failed to create external connection") + } + + // Grant user `ALL` on the newly created External Connection. + grantStatement := fmt.Sprintf(`GRANT ALL ON EXTERNAL CONNECTION "%s" TO %s`, + name, p.User().SQLIdentifier()) + _, err = params.ExecCfg().InternalExecutor.ExecEx(params.ctx, + "grant-on-create-external-connection", p.Txn(), + sessiondata.InternalExecutorOverride{User: username.NodeUserName()}, grantStatement) + if err != nil { + return errors.Wrap(err, "failed to grant on newly created External Connection") + } + return nil } func (c *createExternalConectionNode) Next(_ runParams) (bool, error) { return false, nil } diff --git a/pkg/sql/grant_revoke_system.go b/pkg/sql/grant_revoke_system.go index 7ab47b32af6a..61300bff85e1 100644 --- a/pkg/sql/grant_revoke_system.go +++ b/pkg/sql/grant_revoke_system.go @@ -224,13 +224,13 @@ func (n *changeNonDescriptorBackedPrivilegesNode) makeSystemPrivilegeObject( var ret []syntheticprivilege.Object for _, externalConnectionName := range n.targets.ExternalConnections { // Ensure that an External Connection of this name actually exists. - if _, err := externalconn.LoadExternalConnection(ctx, externalConnectionName.String(), + if _, err := externalconn.LoadExternalConnection(ctx, string(externalConnectionName), p.ExecCfg().InternalExecutor, p.Txn()); err != nil { return nil, errors.Wrap(err, "failed to resolve External Connection") } ret = append(ret, &syntheticprivilege.ExternalConnectionPrivilege{ - ConnectionName: externalConnectionName.String(), + ConnectionName: string(externalConnectionName), }) } return ret, nil diff --git a/pkg/sql/logictest/testdata/logic_test/external_connection_privileges b/pkg/sql/logictest/testdata/logic_test/external_connection_privileges index 6bf58aeb7d9b..2e6422deb357 100644 --- a/pkg/sql/logictest/testdata/logic_test/external_connection_privileges +++ b/pkg/sql/logictest/testdata/logic_test/external_connection_privileges @@ -24,6 +24,7 @@ GRANT USAGE,DROP ON EXTERNAL CONNECTION foo TO testuser query TTTT SELECT * FROM system.privileges ---- +root /externalconn/foo {ALL} {} testuser /externalconn/foo {DROP,USAGE} {} statement ok @@ -32,6 +33,7 @@ REVOKE USAGE,DROP ON EXTERNAL CONNECTION foo FROM testuser query TTTT SELECT * FROM system.privileges ---- +root /externalconn/foo {ALL} {} statement ok GRANT USAGE,DROP ON EXTERNAL CONNECTION foo TO testuser @@ -62,6 +64,7 @@ query TTTT SELECT * FROM system.privileges ORDER BY username ---- bar /externalconn/foo {DROP,USAGE} {} +root /externalconn/foo {ALL} {} testuser /externalconn/foo {DROP,USAGE} {DROP,USAGE} # Invalid grants on external connections. diff --git a/pkg/sql/logictest/testdata/logic_test/system_privileges b/pkg/sql/logictest/testdata/logic_test/system_privileges index 69a47a0f5a63..a89fcea738bd 100644 --- a/pkg/sql/logictest/testdata/logic_test/system_privileges +++ b/pkg/sql/logictest/testdata/logic_test/system_privileges @@ -39,7 +39,8 @@ user root query TTTT SELECT * FROM system.privileges ORDER BY 1, 2 ---- -testuser /global/ {EXTERNALCONNECTION,MODIFYCLUSTERSETTING} {} +testuser /externalconn/foo {ALL} {} +testuser /global/ {EXTERNALCONNECTION,MODIFYCLUSTERSETTING} {} query TT SELECT connection_name, connection_type FROM system.external_connections @@ -52,6 +53,11 @@ REVOKE SYSTEM MODIFYCLUSTERSETTING FROM testuser statement ok REVOKE SYSTEM EXTERNALCONNECTION FROM testuser +# testuser is granted ALL privileges on the External Connection they create, +# revoke that. +statement ok +REVOKE ALL ON EXTERNAL CONNECTION foo FROM testuser + user testuser statement error pq: only users with either MODIFYCLUSTERSETTING or VIEWCLUSTERSETTING system privileges are allowed to read crdb_internal.cluster_settings @@ -119,7 +125,7 @@ REVOKE SYSTEM MODIFYCLUSTERSETTING FROM testuser query TTTT SELECT * FROM system.privileges ORDER BY 1, 2 ---- -root /global/ {MODIFYCLUSTERSETTING} {} +root /global/ {MODIFYCLUSTERSETTING} {} # test VIEWCLUSTERSETTING user testuser @@ -158,7 +164,7 @@ user root query TTTT SELECT * FROM system.privileges ---- -root /global/ {MODIFYCLUSTERSETTING} {} +root /global/ {MODIFYCLUSTERSETTING} {} # test VIEWACTIVITY user testuser @@ -197,7 +203,7 @@ user root query TTTT SELECT * FROM system.privileges ---- -root /global/ {MODIFYCLUSTERSETTING} {} +root /global/ {MODIFYCLUSTERSETTING} {} # test VIEWACTIVITYREDACTED user testuser @@ -220,8 +226,8 @@ user root query TTTT SELECT * FROM system.privileges ORDER BY 1, 2 ---- -root /global/ {MODIFYCLUSTERSETTING} {} -testuser /global/ {VIEWACTIVITYREDACTED} {} +root /global/ {MODIFYCLUSTERSETTING} {} +testuser /global/ {VIEWACTIVITYREDACTED} {} statement ok REVOKE SYSTEM VIEWACTIVITYREDACTED FROM testuser @@ -236,4 +242,4 @@ user root query TTTT SELECT * FROM system.privileges ---- -root /global/ {MODIFYCLUSTERSETTING} {} +root /global/ {MODIFYCLUSTERSETTING} {} From 3f308d148968ec425cb45ac0012f16b25617af65 Mon Sep 17 00:00:00 2001 From: Eric Harmeling Date: Thu, 18 Aug 2022 12:27:52 -0400 Subject: [PATCH 3/5] cluster-ui: added internal prefix to transaction workload insights view Previously, the connected component for transaction workload insights was using the selector for the sessions data slice to load the internal app name prefix. This isn't necessary; we can just use a constant, as is done on other pages. Fixes #86250. Release justification: bug fixes and low-risk updates to new functionality Release note: None --- .../transactionInsights/transactionInsights.fixture.ts | 1 - .../transactionInsights/transactionInsightsView.tsx | 7 +++---- .../src/views/insights/workloadInsightsPageConnected.tsx | 2 -- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsights.fixture.ts b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsights.fixture.ts index 3cb941b16c95..1c039445db0a 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsights.fixture.ts +++ b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsights.fixture.ts @@ -57,7 +57,6 @@ export const transactionInsightsPropsFixture: TransactionInsightsViewProps = { filters: { app: "", }, - internalAppNamePrefix: "$ internal", refreshTransactionInsights: () => {}, onSortChange: () => {}, onFiltersChange: () => {}, diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsightsView.tsx b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsightsView.tsx index ea544268defe..976bb97147cd 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsightsView.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsightsView.tsx @@ -56,7 +56,6 @@ export type TransactionInsightsViewStateProps = { transactionsError: Error | null; filters: InsightEventFilters; sortSetting: SortSetting; - internalAppNamePrefix: string; }; export type TransactionInsightsViewDispatchProps = { @@ -69,6 +68,7 @@ export type TransactionInsightsViewProps = TransactionInsightsViewStateProps & TransactionInsightsViewDispatchProps; const INSIGHT_TXN_SEARCH_PARAM = "q"; +const INTERNAL_APP_NAME_PREFIX = "$ internal"; export const TransactionInsightsView: React.FC< TransactionInsightsViewProps @@ -77,7 +77,6 @@ export const TransactionInsightsView: React.FC< transactions, transactionsError, filters, - internalAppNamePrefix, refreshTransactionInsights, onFiltersChange, onSortChange, @@ -178,13 +177,13 @@ export const TransactionInsightsView: React.FC< const apps = getAppsFromTransactionInsights( transactionInsights, - internalAppNamePrefix, + INTERNAL_APP_NAME_PREFIX, ); const countActiveFilters = calculateActiveFilters(filters); const filteredTransactions = filterTransactionInsights( transactionInsights, filters, - internalAppNamePrefix, + INTERNAL_APP_NAME_PREFIX, search, ); diff --git a/pkg/ui/workspaces/db-console/src/views/insights/workloadInsightsPageConnected.tsx b/pkg/ui/workspaces/db-console/src/views/insights/workloadInsightsPageConnected.tsx index 3a9108346eb5..885a4280a50e 100644 --- a/pkg/ui/workspaces/db-console/src/views/insights/workloadInsightsPageConnected.tsx +++ b/pkg/ui/workspaces/db-console/src/views/insights/workloadInsightsPageConnected.tsx @@ -19,7 +19,6 @@ import { TransactionInsightsViewDispatchProps, TransactionInsightsView, } from "@cockroachlabs/cluster-ui"; -import { selectAppName } from "src/selectors/activeExecutionsSelectors"; import { filtersLocalSetting, sortSettingLocalSetting, @@ -34,7 +33,6 @@ const mapStateToProps = ( transactionsError: state.cachedData?.insights.lastError, filters: filtersLocalSetting.selector(state), sortSetting: sortSettingLocalSetting.selector(state), - internalAppNamePrefix: selectAppName(state), }); const mapDispatchToProps = { From 73f41a6775dd93fa4e6f16a8531f7fe9416b0b4a Mon Sep 17 00:00:00 2001 From: DrewKimball Date: Mon, 15 Aug 2022 17:14:17 -0700 Subject: [PATCH 4/5] opt: use only required columns in provided ordering for project Project operators can only pass through their input ordering. However, the provided ordering may have to be remapped in order to ensure it only refers to output columns, since the `Project` can add and remove columns. The `Project` uses its `internalFDs` field to accomplish the remapping; these are constructed when the `Project` is added to the memo by combining the functional dependencies of the input and the projections. The problem occurs when transformation rules cause the input of the `Project` to "reveal" additional functional dependencies. For example, one side of a union may be eliminated and the FDs of the remaining side used in the result. This can cause the `Project` to output an ordering that is equivalent to the required ordering according to its own FDs, but which a parent operator cannot tell is equivalent because its FDs were calculated before the tranformation rule fired. This can cause panics later down the line when the provided ordering does not match up with the required ordering. In the following example, an exploration rule transforms the join into two joins unioned together, one over each disjunct. After the transformation, a normalization rule fires that removes the `t0.rowid IS NULL` side because rowids are non-null. This reveals the `t1.rowid = t0.rowid` FD, which later causes `t0.rowid` to be used in a provided ordering rather than `t1.rowid`. For the reasons mentioned above, this later causes a panic when a `Project` attempts to remap to the required `t1.rowid` ordering. ``` CREATE TABLE t0 (c0 INT); CREATE TABLE t1 (c0 INT); SELECT * FROM t0 CROSS JOIN t1 WHERE (t0.rowid IS NULL) OR (t1.rowid IN (t0.rowid)) ORDER BY t1.rowid; ``` This commit prevents the panic by making `Project` operators remap the input provided ordering to use columns from the required ordering (which are a subset of the output columns). This prevents the disparity between required and provided orderings that can cause panics down the line. In the example given above, the `t1.rowid` column would be chosen for the provided ordering because it is in the required ordering. Fixes #85393 Release note (bug fix): fixed a vulnerability in the optimizer that could cause a panic in rare cases when planning complex queries with `ORDER BY`. Release justification: fixes a release-blocker --- pkg/sql/opt/ordering/project.go | 16 +++++- pkg/sql/opt/xform/testdata/physprops/ordering | 54 +++++++++++++++++++ 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/pkg/sql/opt/ordering/project.go b/pkg/sql/opt/ordering/project.go index 62410404af24..b824cfba05ce 100644 --- a/pkg/sql/opt/ordering/project.go +++ b/pkg/sql/opt/ordering/project.go @@ -14,6 +14,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" + "github.com/cockroachdb/errors" ) func projectCanProvideOrdering(expr memo.RelExpr, required *props.OrderingChoice) bool { @@ -74,13 +75,24 @@ func projectOrderingToInput( } func projectBuildProvided(expr memo.RelExpr, required *props.OrderingChoice) opt.Ordering { - p := expr.(*memo.ProjectExpr) + // Ensure that the child provided ordering only refers to columns from the + // required ordering choice. This is necessary because there may be cases + // where the input of the Project has undergone transformations that allow it + // to "see" more functional dependencies than the original memo group. This + // can cause the child to provide an ordering that is equivalent to the + // required ordering, but which the parent Project cannot prove is equivalent + // because its FDs have less information. This can lead to a panic later on. + ordCols := required.ColSet() + if !ordCols.SubsetOf(expr.Relational().OutputCols) { + panic(errors.AssertionFailedf("expected required columns to be a subset of output columns")) + } // Project can only satisfy required orderings that refer to projected // columns; it should always be possible to remap the columns in the input's // provided ordering. + p := expr.(*memo.ProjectExpr) return remapProvided( p.Input.ProvidedPhysical().Ordering, p.InternalFDs(), - p.Relational().OutputCols, + ordCols, ) } diff --git a/pkg/sql/opt/xform/testdata/physprops/ordering b/pkg/sql/opt/xform/testdata/physprops/ordering index 460b8e05cb2d..61db209f459a 100644 --- a/pkg/sql/opt/xform/testdata/physprops/ordering +++ b/pkg/sql/opt/xform/testdata/physprops/ordering @@ -2845,3 +2845,57 @@ sort (segmented) │ ├── key: (1-3) │ └── ordering: +1,+2,+3 └── filters (true) + +# Regression test for #85393 - use only columns from the required ordering when +# building the provided ordering for Project operators. +exec-ddl +CREATE TABLE t0_85393 (c0 INT); +---- + +exec-ddl +CREATE TABLE t1_85393 (c0 INT); +---- + +opt +SELECT * +FROM t0_85393 CROSS JOIN t1_85393 +WHERE (t0_85393.rowid IS NULL) OR (t1_85393.rowid IN (t0_85393.rowid)) +ORDER BY t1_85393.rowid; +---- +project + ├── columns: c0:1 c0:5 [hidden: t1_85393.rowid:6!null] + ├── fd: (6)-->(5) + ├── ordering: +6 + └── project + ├── columns: t0_85393.c0:1 t0_85393.rowid:2!null t1_85393.c0:5 t1_85393.rowid:6!null + ├── key: (2,6) + ├── fd: (2)-->(1), (6)-->(5) + ├── ordering: +6 + └── project + ├── columns: t0_85393.c0:1 t0_85393.rowid:2!null t1_85393.c0:5 t1_85393.rowid:6!null + ├── key: (6) + ├── fd: (2)==(6), (6)==(2), (2)-->(1,5) + ├── ordering: +(2|6) [actual: +2] + ├── inner-join (merge) + │ ├── columns: t0_85393.c0:9 t0_85393.rowid:10!null t1_85393.c0:13 t1_85393.rowid:14!null + │ ├── left ordering: +10 + │ ├── right ordering: +14 + │ ├── key: (14) + │ ├── fd: (10)-->(9), (14)-->(13), (10)==(14), (14)==(10) + │ ├── ordering: +(10|14) [actual: +10] + │ ├── scan t0_85393 + │ │ ├── columns: t0_85393.c0:9 t0_85393.rowid:10!null + │ │ ├── key: (10) + │ │ ├── fd: (10)-->(9) + │ │ └── ordering: +10 + │ ├── scan t1_85393 + │ │ ├── columns: t1_85393.c0:13 t1_85393.rowid:14!null + │ │ ├── key: (14) + │ │ ├── fd: (14)-->(13) + │ │ └── ordering: +14 + │ └── filters (true) + └── projections + ├── t0_85393.c0:9 [as=t0_85393.c0:1, outer=(9)] + ├── t0_85393.rowid:10 [as=t0_85393.rowid:2, outer=(10)] + ├── t1_85393.c0:13 [as=t1_85393.c0:5, outer=(13)] + └── t1_85393.rowid:14 [as=t1_85393.rowid:6, outer=(14)] From f964648e1d0ced3db8cb4e3112cf37a5c53459c0 Mon Sep 17 00:00:00 2001 From: j82w Date: Fri, 12 Aug 2022 15:52:11 -0400 Subject: [PATCH 5/5] sql: add index recommendation to execution_insights table part of #81024 Release justification: Category 2: Bug fixes and low-risk updates to new functionality Release note (sql change): Adds index recommendation to execution_insights --- pkg/sql/crdb_internal.go | 11 +- .../testdata/logic_test/create_statements | 12 +- pkg/sql/sqlstats/insights/insights.proto | 2 + .../sqlstats/insights/integration/BUILD.bazel | 1 + .../insights/integration/insights_test.go | 126 ++++++++++++++++-- .../sqlstats/ssmemstorage/ss_mem_writer.go | 37 ++--- 6 files changed, 155 insertions(+), 34 deletions(-) diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index c4537abba12b..43b0400e4be1 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -6353,7 +6353,8 @@ CREATE TABLE crdb_internal.%s ( retries INT8 NOT NULL, last_retry_reason STRING, exec_node_ids INT[] NOT NULL, - contention INTERVAL + contention INTERVAL, + index_recommendations STRING[] NOT NULL )` var crdbInternalClusterExecutionInsightsTable = virtualSchemaTable{ @@ -6428,6 +6429,13 @@ func populateExecutionInsights( ) } + indexRecommendations := tree.NewDArray(types.String) + for _, recommendation := range insight.Statement.IndexRecommendation { + if err := indexRecommendations.Append(tree.NewDString(recommendation)); err != nil { + return err + } + } + err = errors.CombineErrors(err, addRow( tree.NewDString(hex.EncodeToString(insight.Session.ID.GetBytes())), tree.NewDUuid(tree.DUuid{UUID: insight.Transaction.ID}), @@ -6450,6 +6458,7 @@ func populateExecutionInsights( autoRetryReason, execNodeIDs, contentionTime, + indexRecommendations, )) } return diff --git a/pkg/sql/logictest/testdata/logic_test/create_statements b/pkg/sql/logictest/testdata/logic_test/create_statements index ff0356e79b1c..87085087c55f 100644 --- a/pkg/sql/logictest/testdata/logic_test/create_statements +++ b/pkg/sql/logictest/testdata/logic_test/create_statements @@ -264,7 +264,8 @@ CREATE TABLE crdb_internal.cluster_execution_insights ( retries INT8 NOT NULL, last_retry_reason STRING NULL, exec_node_ids INT8[] NOT NULL, - contention INTERVAL NULL + contention INTERVAL NULL, + index_recommendations STRING[] NOT NULL ) CREATE TABLE crdb_internal.cluster_execution_insights ( session_id STRING NOT NULL, txn_id UUID NOT NULL, @@ -286,7 +287,8 @@ CREATE TABLE crdb_internal.cluster_execution_insights ( retries INT8 NOT NULL, last_retry_reason STRING NULL, exec_node_ids INT8[] NOT NULL, - contention INTERVAL NULL + contention INTERVAL NULL, + index_recommendations STRING[] NOT NULL ) {} {} CREATE TABLE crdb_internal.cluster_inflight_traces ( trace_id INT8 NOT NULL, @@ -976,7 +978,8 @@ CREATE TABLE crdb_internal.node_execution_insights ( retries INT8 NOT NULL, last_retry_reason STRING NULL, exec_node_ids INT8[] NOT NULL, - contention INTERVAL NULL + contention INTERVAL NULL, + index_recommendations STRING[] NOT NULL ) CREATE TABLE crdb_internal.node_execution_insights ( session_id STRING NOT NULL, txn_id UUID NOT NULL, @@ -998,7 +1001,8 @@ CREATE TABLE crdb_internal.node_execution_insights ( retries INT8 NOT NULL, last_retry_reason STRING NULL, exec_node_ids INT8[] NOT NULL, - contention INTERVAL NULL + contention INTERVAL NULL, + index_recommendations STRING[] NOT NULL ) {} {} CREATE TABLE crdb_internal.node_inflight_trace_spans ( trace_id INT8 NOT NULL, diff --git a/pkg/sql/sqlstats/insights/insights.proto b/pkg/sql/sqlstats/insights/insights.proto index 3f7ca257d376..7634c5a17ae6 100644 --- a/pkg/sql/sqlstats/insights/insights.proto +++ b/pkg/sql/sqlstats/insights/insights.proto @@ -65,6 +65,8 @@ message Statement { // Nodes is the ordered list of nodes ids on which the statement was executed. repeated int64 nodes = 17; google.protobuf.Duration contention = 18 [(gogoproto.stdduration) = true]; + repeated string index_recommendation = 19; + } message Insight { diff --git a/pkg/sql/sqlstats/insights/integration/BUILD.bazel b/pkg/sql/sqlstats/insights/integration/BUILD.bazel index f2d6b7ff575a..f66c53ac31db 100644 --- a/pkg/sql/sqlstats/insights/integration/BUILD.bazel +++ b/pkg/sql/sqlstats/insights/integration/BUILD.bazel @@ -13,6 +13,7 @@ go_test( "//pkg/sql/sqlstats/insights", "//pkg/testutils", "//pkg/testutils/serverutils", + "//pkg/testutils/skip", "//pkg/testutils/testcluster", "//pkg/util/leaktest", "//pkg/util/log", diff --git a/pkg/sql/sqlstats/insights/integration/insights_test.go b/pkg/sql/sqlstats/insights/integration/insights_test.go index b13dea8fb068..210bedbca19d 100644 --- a/pkg/sql/sqlstats/insights/integration/insights_test.go +++ b/pkg/sql/sqlstats/insights/integration/insights_test.go @@ -15,6 +15,7 @@ import ( gosql "database/sql" "fmt" "os" + "strings" "sync" "testing" "time" @@ -27,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/insights" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -44,6 +46,8 @@ func TestInsightsIntegration(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) + const appName = "TestInsightsIntegration" + // Start the cluster. (One node is sufficient; the outliers system is currently in-memory only.) ctx := context.Background() settings := cluster.MakeTestingClusterSettings() @@ -56,12 +60,17 @@ func TestInsightsIntegration(t *testing.T) { latencyThreshold := 250 * time.Millisecond insights.LatencyThreshold.Override(ctx, &settings.SV, latencyThreshold) + _, err := conn.ExecContext(ctx, "SET SESSION application_name=$1", appName) + require.NoError(t, err) + // See no recorded insights. var count int - row := conn.QueryRowContext(ctx, "SELECT count(*) FROM crdb_internal.cluster_execution_insights") - err := row.Scan(&count) + var queryText string + row := conn.QueryRowContext(ctx, "SELECT count(*), coalesce(string_agg(query, ';'),'') "+ + "FROM crdb_internal.cluster_execution_insights where app_name = $1 ", appName) + err = row.Scan(&count, &queryText) require.NoError(t, err) - require.Equal(t, 0, count) + require.Equal(t, 0, count, "expect:0, actual:%d, queries:%s", count, queryText) queryDelayInSeconds := 2 * latencyThreshold.Seconds() // Execute a "long-running" statement, running longer than our latencyThreshold. @@ -70,12 +79,13 @@ func TestInsightsIntegration(t *testing.T) { // Eventually see one recorded insight. testutils.SucceedsWithin(t, func() error { - row = conn.QueryRowContext(ctx, "SELECT count(*) FROM crdb_internal.cluster_execution_insights") - if err = row.Scan(&count); err != nil { + row = conn.QueryRowContext(ctx, "SELECT count(*), coalesce(string_agg(query, ';'),'') "+ + "FROM crdb_internal.cluster_execution_insights where app_name = $1 ", appName) + if err = row.Scan(&count, &queryText); err != nil { return err } if count != 1 { - return fmt.Errorf("expected 1, but was %d", count) + return fmt.Errorf("expected 1, but was %d, queryText:%s", count, queryText) } return nil }, 1*time.Second) @@ -88,17 +98,26 @@ func TestInsightsIntegration(t *testing.T) { "start_time, "+ "end_time, "+ "full_scan "+ - "FROM crdb_internal.node_execution_insights") + "FROM crdb_internal.node_execution_insights where "+ + "query = $1 and app_name = $2 ", "SELECT pg_sleep($1)", appName) var query, status string var startInsights, endInsights time.Time var fullScan bool err = row.Scan(&query, &status, &startInsights, &endInsights, &fullScan) - require.NoError(t, err) - require.Equal(t, "SELECT pg_sleep($1)", query) - require.Equal(t, "completed", status) - require.GreaterOrEqual(t, endInsights.Sub(startInsights).Seconds(), queryDelayInSeconds) + if err != nil { + return err + } + + if status != "completed" { + return fmt.Errorf("expected 'completed', but was %s", status) + } + + delayFromTable := endInsights.Sub(startInsights).Seconds() + if delayFromTable < queryDelayInSeconds { + return fmt.Errorf("expected at least %f, but was %f", delayFromTable, queryDelayInSeconds) + } return nil }, 1*time.Second) @@ -199,3 +218,88 @@ func TestInsightsIntegrationForContention(t *testing.T) { return nil }, 1*time.Second) } + +// Testing that the index recommendation is included +// in the insights table +func TestInsightsIndexRecommendationIntegration(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + skip.UnderStressRace(t, "expensive tests") + + ctx := context.Background() + settings := cluster.MakeTestingClusterSettings() + args := base.TestClusterArgs{ServerArgs: base.TestServerArgs{Settings: settings}} + tc := testcluster.StartTestCluster(t, 1, args) + defer tc.Stopper().Stop(ctx) + + // Enable detection by setting a latencyThreshold > 0. + latencyThreshold := 30 * time.Millisecond + insights.LatencyThreshold.Override(ctx, &settings.SV, latencyThreshold) + + sqlConn := tc.ServerConn(0) + + _, err := sqlConn.ExecContext(ctx, "CREATE TABLE t1 (k INT, i INT, f FLOAT, s STRING)") + require.NoError(t, err) + _, err = sqlConn.ExecContext(ctx, "CREATE TABLE t2 (k INT, i INT, s STRING)") + require.NoError(t, err) + + query := "SELECT t1.k FROM t1 JOIN t2 ON t1.k = t2.k WHERE t1.i > 3 AND t2.i > 3" + + // Execute enough times to have index recommendations generated. + // This will generate two recommendations. + for i := 0; i < 10; i++ { + tx, err := sqlConn.BeginTx(ctx, &gosql.TxOptions{}) + require.NoError(t, err) + _, err = tx.ExecContext(ctx, "select pg_sleep(.05);") + require.NoError(t, err) + _, err = tx.ExecContext(ctx, query) + require.NoError(t, err) + err = tx.Commit() + require.NoError(t, err) + } + + // Verify the table content is valid. + testutils.SucceedsWithin(t, func() error { + rows, err := sqlConn.QueryContext(ctx, "SELECT "+ + "query, "+ + "array_to_string(index_recommendations, ';') as cmb_index_recommendations "+ + "FROM crdb_internal.node_execution_insights "+ + "where array_length(index_recommendations, 1) > 0 and "+ + "query like 'SELECT t1.k FROM t1 JOIN t2 ON t1.k = t2.k%' ") + + if err != nil { + return err + } + + var rowCount int + for rows.Next() { + var query string + var idxRecommendation string + err := rows.Scan(&query, &idxRecommendation) + if err != nil { + return err + } + + if query != "SELECT t1.k FROM t1 JOIN t2 ON t1.k = t2.k WHERE (t1.i > _) AND (t2.i > _)" { + return fmt.Errorf("'SELECT t1.k FROM t1 JOIN t2 ON t1.k = t2.k WHERE (t1.i > _) AND (t2.i > _)' should be %s", query) + } + + if idxRecommendation == "" { + return fmt.Errorf("index recommendation should not be empty '%s'", idxRecommendation) + } + + if !strings.Contains(idxRecommendation, "CREATE INDEX") { + return fmt.Errorf("index recommendation should contain 'CREATE INDEX' actual:'%s'", idxRecommendation) + } + + rowCount++ + } + + if rowCount < 1 { + return fmt.Errorf("no rows found with index recommendation") + } + + return nil + }, 1*time.Second) +} diff --git a/pkg/sql/sqlstats/ssmemstorage/ss_mem_writer.go b/pkg/sql/sqlstats/ssmemstorage/ss_mem_writer.go index 80601ee6fa23..0d826574476f 100644 --- a/pkg/sql/sqlstats/ssmemstorage/ss_mem_writer.go +++ b/pkg/sql/sqlstats/ssmemstorage/ss_mem_writer.go @@ -182,24 +182,25 @@ func (s *Container) RecordStatement( } s.outliersRegistry.ObserveStatement(value.SessionID, &insights.Statement{ - ID: value.StatementID, - FingerprintID: stmtFingerprintID, - LatencyInSeconds: value.ServiceLatency, - Query: value.Query, - Status: getStatusString(value.StatementError), - StartTime: value.StartTime, - EndTime: value.EndTime, - FullScan: value.FullScan, - User: value.SessionData.User().Normalized(), - ApplicationName: value.SessionData.ApplicationName, - Database: value.SessionData.Database, - PlanGist: value.PlanGist, - Retries: int64(value.AutoRetryCount), - AutoRetryReason: autoRetryReason, - RowsRead: value.RowsRead, - RowsWritten: value.RowsWritten, - Nodes: value.Nodes, - Contention: contention, + ID: value.StatementID, + FingerprintID: stmtFingerprintID, + LatencyInSeconds: value.ServiceLatency, + Query: value.Query, + Status: getStatusString(value.StatementError), + StartTime: value.StartTime, + EndTime: value.EndTime, + FullScan: value.FullScan, + User: value.SessionData.User().Normalized(), + ApplicationName: value.SessionData.ApplicationName, + Database: value.SessionData.Database, + PlanGist: value.PlanGist, + Retries: int64(value.AutoRetryCount), + AutoRetryReason: autoRetryReason, + RowsRead: value.RowsRead, + RowsWritten: value.RowsWritten, + Nodes: value.Nodes, + Contention: contention, + IndexRecommendation: value.IndexRecommendations, }) return stats.ID, nil